repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
billy-inn/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/examples/learn/wide_n_deep_tutorial.py | 4 | 8355 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
FLAGS = None
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Returns an `input_fn` required by Estimator train/evaluate.
Args:
data_file: The file path to the dataset.
num_epochs: Number of epochs to iterate over data. If `None`, `input_fn`
will generate infinite stream of data.
shuffle: bool, whether to read the data in random order.
"""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=1)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
train_file_name, test_file_name = maybe_download(FLAGS.train_data,
FLAGS.test_data)
# Specify file path below if want to find the output easily
model_dir = FLAGS.model_dir if FLAGS.model_dir else tempfile.mkdtemp()
estimator = build_estimator(model_dir, FLAGS.model_type)
# `tf.estimator.TrainSpec`, `tf.estimator.EvalSpec`, and
# `tf.estimator.train_and_evaluate` API are available in TF 1.4.
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
max_steps=FLAGS.train_steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
# set steps to None to run evaluation until all data consumed.
steps=None)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Manual cleanup
shutil.rmtree(model_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/stackplot.py | 6 | 4198 | """
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=color, label=six.next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| gpl-3.0 |
drasmuss/numpy | numpy/core/function_base.py | 3 | 7301 | from __future__ import division, absolute_import, print_function
import warnings
import operator
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def _index_deprecate(i, stacklevel=2):
try:
i = operator.index(i)
except TypeError:
msg = ("object of type {} cannot be safely interpreted as "
"an integer.".format(type(i)))
i = int(i)
stacklevel += 1
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
return i
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
# 2016-02-25, 1.12
num = _index_deprecate(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| bsd-3-clause |
qiwsir/vincent | examples/grouped_bar_examples.py | 11 | 2923 | # -*- coding: utf-8 -*-
"""
Vincent Grouped Bar Examples
"""
#Build a Grouped Bar Chart from scratch
import pandas as pd
from vincent import *
from vincent.core import KeyedList
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 40, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
farm_5 = {'apples': 20, 'berries': 35, 'squash': 19, 'melons': 17, 'corn': 19}
farm_6 = {'apples': 3, 'berries': 28, 'squash': 21, 'melons': 11, 'corn': 23}
data = [farm_1, farm_2, farm_3, farm_4, farm_5, farm_6]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4', 'Farm 5', 'Farm 6']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.padding = {'top': 10, 'left': 50, 'bottom': 50, 'right': 100}
data = Data.from_pandas(df, grouped=True)
vis.data['table'] = data
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"),
padding=0.2)
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
enter_props = PropertySet(x=ValueRef(scale='pos', field="data.group"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='pos', band=True, offset=-1),
y2=ValueRef(value=0, scale='y'),
fill=ValueRef(scale='color', field='data.col'))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='rect',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
#Mark group properties
facet = Transform(type='facet', keys=['data.idx'])
transform = MarkRef(data='table',transform=[facet])
group_props = PropertySet(x=ValueRef(scale='x', field="key"),
width=ValueRef(scale='x', band=True))
vis.marks[0].properties = MarkProperties(enter=group_props)
vis.marks[0].scales = KeyedList()
vis.marks[0].scales['pos'] = Scale(name='pos', type='ordinal',
range='width',
domain=DataRef(field='data.group'))
vis.axis_titles(x='Farms', y='Total Produce')
vis.legend(title='Produce Type')
vis.to_json('vega.json')
#Convenience method
vis = GroupedBar(df)
vis.axis_titles(x='Farms', y='Total Produce')
vis.width = 700
vis.legend(title='Produce Type')
vis.colors(brew='Pastel1')
vis.to_json('vega.json')
| mit |
NNPDF/reportengine | src/reportengine/figure.py | 1 | 3481 | # -*- coding: utf-8 -*-
"""
Save generated figures in the correct path. Use::
@figure
def provider(arg):
return plt.figure(...)
to have the figure be automatically saved in the correct path, once it is
constructed. Similarly use::
@figuregen
def provider(arg):
for ...:
yield plt.figure(...)
to have the action applied to each element of a generator.
The figures will be automatically closed.
Created on Thu Mar 10 00:59:31 2016
@author: Zahari Kassabov
"""
import logging
import numpy as np
from reportengine.formattingtools import spec_to_nice_name
from reportengine.utils import add_highlight, normalize_name
__all__ = ['figure', 'figuregen']
log = logging.getLogger(__name__)
def _generate_markdown_link(path, caption=None):
if caption is None:
caption = path.suffix
return f"[{caption}]({path})"
class Figure():
def __init__(self, paths):
self.paths = paths
@property
def as_markdown(self):
# Prepare the anchor
anchor_link_target = f'#{self.paths[0].stem}'
# Prepare the link to the actual figures
links = ' '.join(_generate_markdown_link(path) for path in self.paths) + ' '
links += _generate_markdown_link(anchor_link_target, "#")
retmd = f'{{{anchor_link_target}}} \n'
return retmd
def prepare_paths(*,spec, namespace, environment ,**kwargs):
paths = environment.get_figure_paths(spec_to_nice_name(namespace, spec))
#list is important here. The generator gives a hard to trace bug when
#running in parallel
return {'paths':list(paths), 'output':environment.output_path}
def savefig(fig, *, paths, output ,suffix=''):
"""Final action to save figures, with a nice filename"""
#Import here to avoid problems with use()
import matplotlib.pyplot as plt
outpaths = []
for path in paths:
if suffix:
suffix = normalize_name(suffix)
path = path.with_name('_'.join((path.stem, suffix)) + path.suffix)
log.debug("Writing figure file %s" % path)
#Numpy can produce a lot of warnings while working on producing figures
with np.errstate(invalid='ignore'):
fig.savefig(str(path), bbox_inches='tight')
outpaths.append(path.relative_to(output))
plt.close(fig)
return Figure(outpaths)
def savefiglist(figures, paths, output):
"""Final action to save lists of figures. It adds a numerical index as
a suffix, for each figure in the generator."""
res = []
res.append('<div class="figiterwrapper">')
for i, fig in enumerate(figures):
#Support tuples with (suffix, figure)
if isinstance(fig, tuple):
fig, suffix = fig
else:
suffix = str(i)
suffix = normalize_name(suffix)
p_base = [paths[i].relative_to(output) for i in range(len(paths))]
p_full = [
str(p.with_name('_'.join((p.stem, suffix)) + p.suffix)) for p in p_base
]
ref = savefig(fig, paths=paths, output=output, suffix=suffix)
html = (
f'\n<div>'
f'{ref.as_markdown}'
'</div>\n'
)
res.append(html)
res.append("</div>")
return res
@add_highlight
def figure(f):
f.prepare = prepare_paths
f.final_action = savefig
return f
@add_highlight
def figuregen(f):
f.prepare = prepare_paths
f.final_action = savefiglist
return f
| gpl-2.0 |
kristianfoerster/melodist | melodist/station.py | 1 | 16151 | # -*- coding: utf-8 -*-
###############################################################################################################
# This file is part of MELODIST - MEteoroLOgical observation time series DISaggregation Tool #
# a program to disaggregate daily values of meteorological variables to hourly values #
# #
# Copyright (C) 2016 Florian Hanzer (1,2), Kristian Förster (1,2), Benjamin Winter (1,2), Thomas Marke (1) #
# #
# (1) Institute of Geography, University of Innsbruck, Austria #
# (2) alpS - Centre for Climate Change Adaptation, Innsbruck, Austria #
# #
# MELODIST is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# MELODIST is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################################################
from __future__ import print_function, division, absolute_import
import melodist
import melodist.util
import pandas as pd
class Station(object):
"""
Class representing meteorological stations including all relevant
information such as metadata and meteorological time series (observed
and disaggregated)
"""
_columns_daily = [
'tmean',
'tmin',
'tmax',
'precip',
'glob',
'ssd',
'hum',
'wind',
]
_columns_hourly = [
'temp',
'precip',
'glob',
'hum',
'wind',
]
def __init__(self, id=None, name=None, lon=None, lat=None, timezone=None, data_daily=None):
self._lon = None
self._lat = None
self._timezone = None
self._statistics = None
self._data_daily = None
self._data_disagg = None
self.statistics = melodist.StationStatistics(lon=lon, lat=lat)
self.id = id
self.name = name
self.lon = lon
self.lat = lat
self.timezone = timezone
self.sun_times = None
if data_daily is not None:
self.data_daily = data_daily
@property
def data_daily(self):
"""
Daily meteorological time series either derived through observations
or aggregation of hourly data for testing purposes.
"""
return self._data_daily
@data_daily.setter
def data_daily(self, df):
assert isinstance(df, pd.DataFrame)
assert df.index.is_all_dates
# for col in df:
# assert col in Station._columns_daily
assert df.index.resolution == 'day'
assert df.index.is_monotonic_increasing
if df.index.freq is None: # likely some days are missing
df = df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='D'))
for var in 'tmin', 'tmax', 'tmean':
if var in df:
assert not any(df[var] < 200), 'Implausible temperature values detected - temperatures must be in K'
self._data_daily = df.copy()
# create data frame for disaggregated data:
index = melodist.util.hourly_index(df.index)
df = pd.DataFrame(index=index, columns=Station._columns_hourly, dtype=float)
self._data_disagg = df
if self.timezone is not None:
self.calc_sun_times()
@property
def lon(self):
"""
Longitude of the station
"""
return self._lon
@lon.setter
def lon(self, lon):
self._lon = lon
self.statistics._lon = lon
@property
def lat(self):
"""
Latitude of the station
"""
return self._lat
@lat.setter
def lat(self, lat):
self._lat = lat
self.statistics._lat = lat
@property
def timezone(self):
"""
Timezone indicates the differnce in hours calculated from UTC
Negative values indicate timezones later than UTC, i.e. west of 0 deg
long. Positive values indicate the reverse.
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
self._timezone = timezone
self.statistics._timezone = timezone
@property
def statistics(self):
"""
The associated StationStatistics object
"""
return self._statistics
@statistics.setter
def statistics(self, s):
assert isinstance(s, melodist.StationStatistics)
s._lon = self.lon
s._lat = self.lat
s._timezone = self.timezone
self._statistics = s
@property
def data_disagg(self):
"""
All results derived through disaggregation will be stored in this
property.
"""
return self._data_disagg
def calc_sun_times(self):
"""
Computes the times of sunrise, solar noon, and sunset for each day.
"""
self.sun_times = melodist.util.get_sun_times(self.data_daily.index, self.lon, self.lat, self.timezone)
def disaggregate_wind(self, method='equal'):
"""
Disaggregate wind speed.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily wind speed is duplicated for the 24 hours of the day. (Default)
``cosine``
Distributes daily mean wind speed using a cosine function derived from hourly
observations.
``random``
Draws random numbers to distribute wind speed (usually not conserving the
daily average).
"""
self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind)
def disaggregate_humidity(self, method='equal', preserve_daily_mean=False):
"""
Disaggregate relative humidity.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily humidity is duplicated for the 24 hours of the day. (Default)
``minimal``:
Calculates humidity from daily dew point temperature by setting the dew point temperature
equal to the daily minimum temperature.
``dewpoint_regression``:
Calculates humidity from daily dew point temperature by calculating dew point temperature
using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration.
``linear_dewpoint_variation``:
Calculates humidity from hourly dew point temperature by assuming a linear dew point
temperature variation between consecutive days.
``min_max``:
Calculates hourly humidity from observations of daily minimum and maximum humidity.
``month_hour_precip_mean``:
Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values
derived from observations.
preserve_daily_mean : bool, optional
If True, correct the daily mean values of the disaggregated data with the observed daily means.
"""
self.data_disagg.hum = melodist.disaggregate_humidity(
self.data_daily,
temp=self.data_disagg.temp,
method=method,
preserve_daily_mean=preserve_daily_mean,
**self.statistics.hum
)
def disaggregate_temperature(self, method='sine_min_max', min_max_time='fix', mod_nighttime=False):
"""
Disaggregate air temperature.
Parameters
----------
method : str, optional
Disaggregation method.
``sine_min_max``
Hourly temperatures follow a sine function preserving daily minimum
and maximum values. (Default)
``sine_mean``
Hourly temperatures follow a sine function preserving the daily mean
value and the diurnal temperature range.
``sine``
Same as ``sine_min_max``.
``mean_course_min_max``
Hourly temperatures follow an observed average course (calculated for each month),
preserving daily minimum and maximum values.
``mean_course_mean``
Hourly temperatures follow an observed average course (calculated for each month),
preserving the daily mean value and the diurnal temperature range.
min_max_time : str, optional
Method to determine the time of minimum and maximum temperature.
``fix``:
Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time.
``sun_loc``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h.
``sun_loc_shift``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift.
mod_nighttime : bool, optional
Use linear interpolation between minimum and maximum temperature.
"""
self.data_disagg.temp = melodist.disaggregate_temperature(
self.data_daily,
method=method,
min_max_time=min_max_time,
max_delta=self.statistics.temp.max_delta,
mean_course=self.statistics.temp.mean_course,
sun_times=self.sun_times,
mod_nighttime=mod_nighttime
)
def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None):
"""
Disaggregate precipitation.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Daily precipitation is distributed equally over the 24 hours of the day. (Default)
``cascade``
Hourly precipitation values are obtained using a cascade model set up using
hourly observations.
zerodiv : str, optional
Method to deal with zero division, relevant for ``method='masterstation'``.
``uniform``
Use uniform distribution. (Default)
master_precip : Series, optional
Hourly precipitation records from a representative station
(required for ``method='masterstation'``).
"""
if method == 'equal':
precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift)
elif method == 'cascade':
precip_disagg = pd.Series(index=self.data_disagg.index, dtype=float)
for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats):
precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months)
if len(precip_daily) > 1:
data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats,
shift=shift, zerodiv=zerodiv)
precip_disagg.loc[data.index] = data
elif method == 'masterstation':
precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv)
self.data_disagg.precip = precip_disagg
def disaggregate_radiation(self, method='pot_rad', pot_rad=None):
"""
Disaggregate solar radiation.
Parameters
----------
method : str, optional
Disaggregation method.
``pot_rad``
Calculates potential clear-sky hourly radiation and scales it according to the
mean daily radiation. (Default)
``pot_rad_via_ssd``
Calculates potential clear-sky hourly radiation and scales it according to the
observed daily sunshine duration.
``pot_rad_via_bc``
Calculates potential clear-sky hourly radiation and scales it according to daily
minimum and maximum temperature.
``mean_course``
Hourly radiation follows an observed average course (calculated for each month).
pot_rad : Series, optional
Hourly values of potential solar radiation. If ``None``, calculated internally.
"""
if self.sun_times is None:
self.calc_sun_times()
if pot_rad is None and method != 'mean_course':
pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone)
self.data_disagg.glob = melodist.disaggregate_radiation(
self.data_daily,
sun_times=self.sun_times,
pot_rad=pot_rad,
method=method,
angstr_a=self.statistics.glob.angstroem.a,
angstr_b=self.statistics.glob.angstroem.b,
bristcamp_a=self.statistics.glob.bristcamp.a,
bristcamp_c=self.statistics.glob.bristcamp.c,
mean_course=self.statistics.glob.mean_course
)
def interpolate(self, column_hours, method='linear', limit=24, limit_direction='both', **kwargs):
"""
Wrapper function for ``pandas.Series.interpolate`` that can be used to
"disaggregate" values using various interpolation methods.
Parameters
----------
column_hours : dict
Dictionary containing column names in ``data_daily`` and the hour
values they should be associated to.
method, limit, limit_direction, **kwargs
These parameters are passed on to ``pandas.Series.interpolate``.
Examples
--------
Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``,
and ``mystation.data_daily.T19`` contain air temperature measurements
taken at 07:00, 14:00, and 19:00.
We can use the interpolation functions provided by pandas/scipy to derive
hourly values:
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default)
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline
"""
kwargs = dict(kwargs, method=method, limit=limit, limit_direction=limit_direction)
data = melodist.util.prepare_interpolation_data(self.data_daily, column_hours)
return data.interpolate(**kwargs)
| gpl-3.0 |
vitaliykomarov/NEUCOGAR | nest/noradrenaline/nest-2.10.0/topology/examples/test_3d_exp.py | 13 | 2642 | # -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)]
for j in range(1000)]
l1 = topo.CreateLayer({'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_neuron'})
# visualize
#xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
#xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(nest.GetChildren(l1)[0]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75,-0.75,-0.75], 'upper_right': [0.75,0.75,0.75]}},
'kernel':{'exponential': {'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr=topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr,l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr],[yctr],[zctr],s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt,ytgt,ztgt,s=40, facecolor='g', edgecolor='g')
tgts=topo.GetTargetNodes(ctr,l1)[0]
d=topo.Distance(ctr,tgts)
plt.figure()
plt.hist(d, 25)
#plt.show()
| gpl-2.0 |
stefanpeidli/GoNet | Analysis/errorfuns.py | 1 | 4439 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 10 23:06:54 2017
@author: Stefan
"""
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def KLD(suggested, target): #Compute Kullback-Leibler divergence, now stable!
t=target[target!=0] #->we'd divide by 0 else, does not have inpact on error anyway ->Problem: We don't punish the NN for predicting non-zero values on zero target!
s=suggested[target!=0]
difference=s/t #this is stable
Error = - np.inner(t*np.log(difference),np.ones(len(t)))
return Error
def KLD2(suggested, target): #Compute Kullback-Leibler divergence, now stable!
t=target
t=t+1e-100 #->we'd divide by 0 else, does not have inpact on error anyway ->Problem: We don't punish the NN for predicting non-zero values on zero target!
s=suggested
s=s+1e-100
difference=s/t #this is stable
Error = - np.inner(t*np.log(difference),np.ones(len(t)))
return Error
def KLDGRAD(sug, targ):
g=np.zeros(len(targ))
sug=sug+1e-100
for i in range(0,len(targ)):
if sug[i]!=0:
g[i]=-targ[i]/sug[i]
return g
# error fct Number 1
def MSE (suggested, target): #Returns the total mean square error
difference = np.absolute(suggested - target)
Error = 0.5*np.inner(difference,difference)
return Error
# error fct Number 2
def HELDIST (suggested, target):
return np.linalg.norm(np.sqrt(suggested)-np.sqrt(target), ord=2) /np.sqrt(2)
# error fct Number 3
def CROSSENTRO (suggested, target):
return ENTRO(target) + KLD(suggested,target) #wie rum kldiv?
# error fct Number 4
def EXPE (suggested, target, gamma):
alpha = 1/gamma
beta = np.log(gamma)
error = alpha*np.sum(np.exp((suggested - target)*beta))
return error
def EXPEGRAD(suggested, target, gamma=1000):
alpha = 1/gamma
beta = np.log(gamma)
gradient = alpha*beta*np.exp((suggested - target)*beta)
return gradient
# error fct Number x, actually not a good one. Only for statistics
def MAE (suggested, target): #compare the prediction with the answer/target, absolute error
difference = np.absolute(suggested - target)
Error = np.inner(difference,np.ones(len(target)))
return Error
def ENTRO (distribution):
return -np.inner(distribution[distribution!=0],np.log(distribution[distribution!=0]))
le=5
y1=np.zeros(le)
y1[1]=1
y3=np.zeros(le)
y3[3]=1-1/le
y3[1]=1/le
yunif=np.ones(le)/le
w=1/5
ali='center'
for i in [y3,yunif]:
print(np.round(i,2))
print(np.round(y1,2))
print("KLD",KLD(i,y1))
print("KLD2",KLD2(i,y1))
print("MSE",MSE(i,y1))
print("HELDIST",HELDIST(i,y1))
print("CROSSENTRO",CROSSENTRO(i,y1))
print("EXPE",EXPE(i,y1,1000))
print("EXPE2",EXPE(y1,i,1000))
print("MAE",MAE(i,y1))
print(" ")
y0=yunif
eta=0.01
y1=np.array([0,2,4,2,0])
y1=y1/np.sum(y1)
plt.grid(True)
plt.bar(np.arange(le)-w,y1,width=w,align=ali,color='b')
bp = mpatches.Patch(color='blue', label='Target')
plt.bar(np.arange(le),y0,width=w,align=ali,color='r')
rp = mpatches.Patch(color='red', label='Start')
gp = mpatches.Patch(color='green', label='Stop')
plt.legend(handles=[bp,rp,gp])
for j in range(0, 8):
yn=KLDGRAD(y0,y1)
#yn=y0-y1
y0=np.abs(y0-eta*yn)
y0=y0/np.inner(y0,np.ones(le))
#print(KLD(y0,y1))
print(np.round(y1,2))
print(np.round(y0,2))
plt.bar(np.arange(le)+2*w,y0,width=w,align=ali,color='y')
#plt.show()
for i in [y0]:
print("KLD",KLD(i,y1))
print("KLD2",KLD2(i,y1))
print("MSE",MSE(i,y1))
print("HELDIST",HELDIST(i,y1))
print("CROSSENTRO",CROSSENTRO(i,y1))
print("EXPE",EXPE(i,y1,1000))
print("EXPE2",EXPE(y1,i,1000))
print("MAE",MAE(i,y1))
print(" ")
#########
y0=yunif
y=[[0,1,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,1,0]]
for i in range(100):
random.shuffle(y)
for j in y:
y1=j
yn=KLDGRAD(y0,y1)
#yn=y0-y1
y0=np.abs(y0-eta*yn)
y0=y0/np.inner(y0,np.ones(le))
#print(np.round(y1,2))
#print(np.round(y0,2))
#plt.bar(np.arange(le)+w,y0,width=w,align=ali,color='g')
#plt.show()
for i in [y0]:
print("KLD",KLD(i,y1))
print("KLD2",KLD2(i,y1))
print("MSE",MSE(i,y1))
print("HELDIST",HELDIST(i,y1))
print("CROSSENTRO",CROSSENTRO(i,y1))
print("EXPE",EXPE(i,y1,1000))
print("EXPE2",EXPE(y1,i,1000))
print("MAE",MAE(i,y1))
print(" ") | mit |
scramblingbalam/Alta_Real | labeling.py | 1 | 14465 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 06 14:40:17 2017
@author: scram
"""
import json
import cPickle as pickle
from pymongo import MongoClient
from feature_creation_mongo import translatelabel
import feature_functions as feature
from sklearn.externals import joblib
from collections import Counter
import time
### open borwser
import webbrowser
import subprocess
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
import re
feature_list = []
event_feature_dic ={}
#with open("event_model_dic","rb")as modelfile:
# feature_list =pickle.load(modelfile)
feature_list = joblib.load("event_model_dic.jonlib")
#print type(feature_list)
#print len(feature_list)
feature_string = feature_list[0]
event_feature_dic = feature_list[1]
doc2vec_dir ="Data/doc2vec/not_trump"
classifier = 'treecrf'
featurename = feature_string.split("_")
token_type = featurename[0]
### get pos tags # commented out since I'm getting urls from tweet json
#POS_dir ="Data\\twitIE_pos\\"
#pos_file_path1 = POS_dir+token_type+"_semeval2017"+"_twitIE_POS"
#pos_file_path2 = POS_dir+token_type+"_Alta_Real_New"+"_twitIE_POS"
#pos_file_path = [pos_file_path1, pos_file_path2]
#id_pos_dic, index_pos_dic = feature.pos_extract(pos_file_path)
event_target_dic ={}
#with open("event_target_dic","rb")as modelfile:
# event_target_dic = pickle.load(modelfile)
event_target_dic = joblib.load("event_target_dic.jonlib")
event_ID_dic = {}
#with open("event_ID_dic","rb")as modelfile:
# event_ID_dic = pickle.load(modelfile)
event_ID_dic = joblib.load("event_ID_dic.jonlib")
with open(doc2vec_dir+token_type+"_"+"id_text_dic.json",'r')as corpfile:
sent_dic = json.load(corpfile)
import httplib
import urlparse
def unshorten_url(url):
parsed = urlparse.urlparse(url)
h = httplib.HTTPConnection(parsed.netloc)
h.request('HEAD', parsed.path)
response = h.getresponse()
if response.status/100 == 3 and response.getheader('Location'):
return response.getheader('Location')
else:
return url
DBname = 'Alta_Real_New'
DBhost = 'localhost'
DBport = 27017
DBname_t = 'semeval2017'
# initiate Mongo Client
client = MongoClient()
client = MongoClient(DBhost, DBport)
DB_trump = client[DBname]
DB_train = client[DBname_t]
def image_lookup(photo_url):
filePath =photo_url
# filePath = '/mnt/Images/test.png'
searchUrl = "https://www.google.com/searchbyimage?&image_url="
# searchUrl = 'http://www.google.hr/searchbyimage/upload'
# multipart = {'encoded_image': (filePath, open(filePath, 'rb')), 'image_content': ''}
# response = requests.post(searchUrl, files=multipart, allow_redirects=False)
request = searchUrl+photo_url
print(request)
response = requests.get(request, allow_redirects=False)
fetchUrl = response.headers['Location']
webbrowser.open(fetchUrl)
def print_tweet(Tweet):
tweet_info = str(Tweet['_id'])+" "+Tweet['user']['screen_name']+" "+str(Tweet.get('label',''))
print(tweet_info)
text = Tweet['text']
print(text)
# pos = map(lambda x:index_pos_dic[x],id_pos_dic[Tweet['_id']])
if Tweet.get('entities',None):
# time.sleep(3)
if Tweet['entities'].get('media',None):
for num,media in enumerate(Tweet['entities']['media']):
print(media['type'])
if num == 0:
time.sleep(2)
else:
time.sleep(4)
webbrowser.open_new(media['media_url_https'])
if Tweet['entities'].get('urls',None):
for num,URL in enumerate(Tweet['entities']['urls']):
print(URL['display_url'])
if num == 0:
time.sleep(2)
else:
time.sleep(4)
webbrowser.open_new(URL['expanded_url'])
# if u'URL' in pos:
# url_count = Counter(pos)['URL']
# index = pos.index(u'URL')
# url = text.split()[index]
# print(unshorten_url(url))
# webbrowser.open_new(url)
# print("URLS")
# print Tweet['entities']['urls']
# print("media")
# if Tweet['entities']['media']:
# for media in Tweet['entities']['media']:
# print("MEDIA_TYPE",media['type'])
# if media['type'] == 'photo':
# image_lookup(media['media_url_https'])
#
# print Tweet['entities']['media']
# p = subprocess.Popen(["firefox", url])
# time.sleep(5) #delay of 10 seconds
# p.kill()
# driver = webdriver.Chrome()
# driver.get(url)
# time.sleep(3)
# driver.close()
model = joblib.load("tCRF_"+"_"+classifier+"_"+"_".join(featurename)+".crf_model")
test_id = 856172056932700164L#862135824745467905L
def label_tweet(tweet,root_tweet,pred,db,Done_prec):
# print(tweet.get('predicted',None),"predicted")
# print(tweet.get('label',None),"label")
# print(tweet.get('label_parent',None),"label_parent")
sID = tweet['_id']
if root_tweet == tweet:
print("\n________________________________________")
print("Tweet is Root stance to claim")
collection = db.trump_tweets
else:
collection = db.replies_to_trump
print("\n________________________________________")
print_tweet(root_tweet)
parent_id = tweet.get('in_reply_to_status_id',None)
parent_not_root = parent_id != root_tweet['_id'] and tweet['user']['screen_name'] != 'realDonaldTrump'
if parent_not_root:
parent_tweet = list(db.replies_to_trump.find({'_id':parent_id}))[0]
if parent_tweet["in_reply_to_status_id"] != root_tweet['id']:
print("|\n||||||||||||||||\n|")
else:
print("|\n|")
print_tweet(parent_tweet)
# print("\n")
# print(tweet['user']['screen_name'])
print("|\n|")
print_tweet(tweet)
# print(pos_tweet.index('url'))
# print(pred)
# print(tweet['in_reply_to_status_id'],root_tweet['_id'])
# print(tweet['user']['screen_name'])
# print(tweet['created_at'])
collection.update_one(
{'_id':sID},
{'$set':{'predicted':pred}})
try:
label=int(input("\nStance to Root\n1=support 2=deny 3=query 4=comment\n>>\t"))-1
except:
print("EXCEPTION")
label = None
if isinstance(label,int):
if tweet['in_reply_to_screen_name'] == 'realDonaldTrump':
collection.update_many(
{'text':tweet['text']},
{'$set':{'label':feature.inverse_label(label)}},
)
collection.update_many(
{'text':tweet['text']},
{'$set':{'label_parent':feature.inverse_label(label)}},
)
else:
collection.update_one(
{'_id':sID},
{'$set':{'label':feature.inverse_label(label)}})
output = feature.inverse_label(label)+" "+str(Done_prec)+"%"
print(output)
time.sleep(0.5)
if parent_not_root:
try:
label_parent=int(input("\nStance to Parent\n1=support 2=deny 3=query 4=comment\n>>\t"))-1
except:
print("EXCEPTION")
label_parent = None
elif parent_id == root_tweet['_id']:
label_parent = label
else:
label_parent = None
if isinstance(label_parent,int):
collection.update_one(
{'_id':sID},
{'$set':{'label_parent':feature.inverse_label(label_parent)}})
time.sleep(0.5)
# print("\n________________________________________")
def label_thread(thread_id,DB):
preds = model.predict(event_feature_dic[thread_id])
preds = map(feature.inverse_label,preds[0])
root = list(DB.trump_tweets.find({'_id':thread_id}))[0]
total = float(len(preds))
done = 0.0
for predicted,sID in zip(preds,sorted(event_ID_dic[thread_id][0])):
twt = list(DB.replies_to_trump.find({'_id':sID}))
if not twt:
twt = list(DB.trump_tweets.find({'_id':sID}))
twt =twt[0]
# if not twt.get('label',None) and twt.get('in_reply_to_status_id',None)!= root['_id']:
done +=1
if not twt.get('label',None) or not twt.get('label_parent',None):
done_perc = (done/total)*100
print(twt['_id'])
label_tweet(twt,root,predicted,DB,done_perc)
print("THREAD LABELED!!!!")
### working list of threads for labeling
train = [
860477328882905089,#Win in house for 16244
860580764944969728,#weekly address 6497
860577873060651008# JOBS, JOBS, JOBS! https://t.co/UR0eetSEnO 9379
]
#label_thread(train[0],DB_trump)
def dump_thread_labels(thread_id,DB):
preds = model.predict(event_feature_dic[thread_id])
preds = map(feature.inverse_label,preds[0])
root = list(DB.trump_tweets.find({'_id':thread_id}))[0]
label_dic = {}
parent_label_dic ={}
for predicted,sID in zip(preds,sorted(event_ID_dic[thread_id][0])):
twt = list(DB.replies_to_trump.find({'_id':sID}))
if not twt:
twt = list(DB.trump_tweets.find({'_id':sID}))
twt =twt[0]
# if not twt.get('label',None) and twt.get('in_reply_to_status_id',None)!= root['_id']:
if twt.get('label',None):# or twt.get('label_parent',None):
label_dic[twt['id']] = twt['label']
parent_label_dic[twt['id']] = twt['label_parent']
with open("train_labels_thread_"+str(thread_id)+".json","w") as labelfile:
json.dump(label_dic,labelfile)
with open("parent_labels_thread_"+str(thread_id)+".json","w") as parentfile:
json.dump(parent_label_dic,parentfile)
for k,v in zip(sorted(label_dic.items()),sorted(parent_label_dic.items())):
print k[0],k[1],v[1]
print len(parent_label_dic)
print len(label_dic)
#dump_thread_labels(train[0],DB_trump)
def update_thread_labels(thread_id,DB):
label_dic = {}
parent_label_dic ={}
parent_updated = 0
label_updated = 0
# print(DB)
# print( list(DB.replies_to_trump.find({'_id':860583926263238656})) )
with open("train_labels_thread_"+str(thread_id)+".json","r") as labelfile:
label_dic = json.load(labelfile)
with open("parent_labels_thread_"+str(thread_id)+".json","r") as parentfile:
parent_label_dic = json.load(parentfile)
for sID,label in label_dic.items():
# print(sID)
# print(type(sID))
twt = list(DB.replies_to_trump.find({'_id':int(sID)}))
# print(twt)
collection = DB.replies_to_trump
# print(twt)
if not twt:
# print("NOT TWT")
twt = list(DB.trump_tweets.find({'_id':int(sID)}))
collection = DB.trump_tweets
twt =twt[0]
if not twt.get('label',None):
try:
collection.update_many(
{'id':int(sID)},
{'$set':{'label':feature.inverse_label(label)}},
)
label_updated += 1
except Exception as err:
print(err)
print(sID)
else:
if twt['label'] != label:
print("Tweet with ID "+str(sID)+" has two Labels")
print("current label "+twt['label'])
print("new label " + label)
if not twt.get('label_parent',None):
try:
collection.update_many(
{'id':int(sID)},
{'$set':{'label_parent':feature.inverse_label( parent_label_dic[sID])}},
)
parent_updated += 1
except Exception as err:
print(err)
print(sID)
else:
if twt['label_parent'] != parent_label_dic[sID]:
print("Tweet with ID "+str(sID)+" has two Parent labels")
print("current label "+twt['label_parent'])
print("new label " + parent_label_dic[sID])
print("Labels Updated"+str(label_updated))
print("Parent Labels Updated"+str(parent_updated))
def get_full_text(s_id,collection):
twet = list(collection.find({'id':s_id}))[0]
if twet["truncated"] == True:
try:
text_url = twet['entities']['urls'][0]['expanded_url']
response = requests.get(text_url, allow_redirects=False)
# print response
# print response.status_code
if response.status_code == 200:
# print "YES"
soup = BeautifulSoup(response.text)
tweet = soup.findAll('meta', {'property':"og:description"})
reg = re.findall('(?<=<meta content=").+\s*.*(?=" )',str(tweet[0]))
return reg[0]
else:
# print "NO"
return twet['text']
except:
return twet['text']
print text_url
print tweet[0]
truc_tweets = [860592723413348352,860592674838953984,860592669222981633,
860592484287729664,860592429707255810,860592373398511616,
860592245250154496,860592221623648257,860592117722349572,
860592110411681793,860592087569485825,860592042493321216]
#get_full_text(862739199014969348)
#get_full_text(860592723413348352)
#for twt_id in truc_tweets:
# full_text = get_full_text(twt_id)
# DB_trump.replies_to_trump.update_one(
# {'id':int(twt_id)},
# {'$set':{'full_text':full_text}},
# )
#for tweet in list(DB_trump.replies_to_trump.find())[:10]
# get_full_text(twt_id)
collection = DB_trump.replies_to_trump
#for tweet_id in list(collection.distinct('id',{"full_text":{"$exists":False}})):
# full_text = get_full_text(tweet_id,collection)
# DB_trump.replies_to_trump.update_one(
# {'id':int(tweet_id)},
# {'$set':{'full_text':full_text}},
# )
collection = DB_trump.trump_tweets
for tweet_id in list(collection.distinct('id',{"full_text":{"$exists":False}})):
full_text = get_full_text(tweet_id,collection)
DB_trump.trump_tweets.update_one(
{'id':int(tweet_id)},
{'$set':{'full_text':full_text}},
) | mit |
berkeley-stat159/project-alpha | final/image_scripts/convolution_appendix_plots.py | 1 | 2416 | """
Plot producing scripts for convolution appendix
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import sys
project_location= "../../"
functions=project_location +"code/utils/functions/"
location_of_created_images=project_location+"images/"
sys.path.append(functions)
from event_related_fMRI_functions import hrf_single,convolution_specialized
one_zeros = np.zeros(40)
one_zeros[4] = 1
one_zeros[16:20]=1
plt.scatter(np.arange(40),one_zeros)
plt.xlim(-1,40)
plt.title("Stimulus pattern")
plt.savefig(location_of_created_images+"on_off_pattern.png")
plt.close()
plt.plot(np.linspace(0,30,200),np.array([hrf_single(x) for x in np.linspace(0,30,200)]))
plt.title("Single HRF, started at t=0")
plt.savefig(location_of_created_images+"hrf_pattern.png")
plt.close()
convolved=convolution_specialized(np.arange(40),one_zeros,hrf_single,np.linspace(0,60,300))
plt.plot(np.linspace(0,60,300),convolved)
plt.title("Convolution")
plt.savefig(location_of_created_images+"initial_convolved.png")
plt.close()
colors=["#CCCCFF","#C4C3D0","#92A1CF","#2A52BE","#003399","#120A8F","#000080","#002366"]
xx=np.linspace(0,30,3001)
i=3
one_zeros_2 = np.zeros(3001)
one_zeros_2[(2*i*100-15):(2*i*100+15)]=.6
plt.plot(xx,.6-one_zeros_2,color="black")
plt.title(" 'g' Moving Function")
plt.ylim(-.1,1)
plt.savefig(location_of_created_images+"play.png")
plt.close()
xx=np.linspace(0,30,3001)
y1 = np.array([hrf_single(x) for x in np.linspace(0,30,3001)])
plt.plot(xx,y1)
for i in range(len(colors)):
one_zeros_2 = np.zeros(3001)
one_zeros_2[(2*i*100-15):(2*i*100+15)]=.6
y2 = .6-one_zeros_2
# plt.plot(xx,y1)
plt.plot(xx,one_zeros_2,color="black")
plt.plot(xx,y2,color="white")
plt.fill_between(xx,y2,y1 , facecolor=colors[i],where= y2<.6)
plt.plot([15,19.75],[.4,.4],color="red")
plt.plot([19,20],[.41,.4],color="red")
plt.plot([19,20],[.39,.4],color="red")
plt.plot([19,19.75],[.41,.4],color="red")
plt.plot([19,19.75],[.39,.4],color="red")
plt.title("Math Convolution")
plt.savefig(location_of_created_images+"math_convolved.png")
plt.close()
"""
xx=np.linspace(0,30,301)
one_zeros_2 = np.zeros(301)
one_zeros_2[58:62]=.6
y2 = .6-one_zeros_2
y1 = np.array([hrf_single(x) for x in np.linspace(0,30,301)])
plt.plot(xx,y1)
plt.plot(xx,y2,color="white")
plt.fill_between(xx,y2,y1 , facecolor="blue",where= y2<.6)
"""
| bsd-3-clause |
galtay/cosmolabe | tests/test_eh98.py | 1 | 1268 | import numpy as np
import matplotlib.pyplot as plt
import cosmolabe as cl
from cosmolabe.transfer_functions import EH98
plt.ion()
eh98_cosmo_params = {
'Omega_m': 0.2, 'Omega_b': 0.1, 'Omega_c': 0.1, 'h': 0.5,
'T_cmb': 2.728 * cl.u.K
}
def main():
dat = np.loadtxt('trans.dat')
eh98 = EH98(eh98_cosmo_params)
k_arr = np.logspace(-3.0, 0.0, 1000) * eh98.cu.h / eh98.cu.Mpc
Tk_no_wiggles = eh98.T_no_wiggles(k_arr)
Tk_zero_baryon = eh98.T_zero_baryon(k_arr)
Tk = eh98.T(k_arr)
plt.loglog(k_arr, Tk_zero_baryon, color='green', lw=2.0, ls='-',
label='zero baryon')
plt.loglog(k_arr, Tk_no_wiggles, color='lime', lw=2.0, ls='-',
label='no wiggles')
plt.loglog(k_arr, np.abs(Tk), color='red', lw=2.0, ls='-',
label='full fit')
plt.loglog(dat[:,0], dat[:,1], color='blue', lw=1.0, ls='--',
label='original')
plt.grid(which='major', ls='-', lw=1.0, color='grey', alpha=0.5)
plt.grid(which='minor', ls='-', lw=1.0, color='grey', alpha=0.5)
plt.xlabel(r'$k \; [h \, {\rm Mpc}^{-1}]$', fontsize=20)
plt.ylabel(r'$|T(k)|$', fontsize=20)
plt.legend(loc='best')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| mit |
proto-n/Alpenglow | python/test_alpenglow/utils/test_ThreadedParameterSearch.py | 2 | 1048 | import alpenglow as prs
import alpenglow.experiments
import alpenglow.evaluation
import pandas as pd
import math
import unittest
from alpenglow.utils import ParameterSearch, ThreadedParameterSearch
class TestThreadedParameterSearch(unittest.TestCase):
def test_runMultiple(self):
data = pd.read_csv(
"python/test_alpenglow/test_data_4",
sep=' ',
header=None,
names=['time', 'user', 'item', 'id', 'score', 'eval']
)
model = alpenglow.experiments.PopularityExperiment(
top_k=100,
seed=254938879
)
c = ParameterSearch(model, alpenglow.evaluation.DcgScore)
c.set_parameter_values('top_k', [100, 50])
c.set_parameter_values('seed', [254938879, 123456])
d = ThreadedParameterSearch(model, alpenglow.evaluation.DcgScore)
d.set_parameter_values('top_k', [100, 50])
d.set_parameter_values('seed', [254938879, 123456])
r1 = c.run(data)
r2 = d.run(data)
assert r1.equals(r2)
| apache-2.0 |
MohammedWasim/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
davebx/tools-iuc | tools/table_compute/scripts/safety.py | 17 | 9977 | import re
class Safety():
"""
Class to safely evaluate mathematical expression on single
or table data
"""
__allowed_tokens = (
'(', ')', 'if', 'else', 'or', 'and', 'not', 'in',
'+', '-', '*', '/', '%', ',', '!=', '==', '>', '>=', '<', '<=',
'min', 'max', 'sum',
)
__allowed_ref_types = {
'pd.DataFrame': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'applymap', 'as_matrix', 'asfreq', 'at', 'axes', 'bool',
'clip', 'clip_lower', 'clip_upper', 'columns', 'combine',
'compound', 'corr', 'count', 'cov', 'cummax', 'cummin', 'cumprod',
'cumsum', 'describe', 'div', 'divide', 'dot', 'drop',
'drop_duplicates', 'droplevel', 'dropna', 'duplicated', 'empty',
'eq', 'equals', 'expanding', 'ffill', 'fillna', 'filter', 'first',
'first_valid_index', 'floordiv', 'ge', 'groupby', 'gt', 'head',
'iat', 'iloc', 'index', 'insert', 'interpolate', 'isin', 'isna',
'isnull', 'items', 'iteritems', 'iterrows', 'itertuples', 'ix',
'join', 'keys', 'kurt', 'kurtosis', 'last', 'last_valid_index',
'le', 'loc', 'lookup', 'lt', 'mad', 'mask', 'max', 'mean',
'median', 'melt', 'merge', 'min', 'mod', 'mode', 'mul', 'multiply',
'ndim', 'ne', 'nlargest', 'notna', 'notnull', 'nsmallest',
'nunique', 'pct_change', 'pivot', 'pivot_table', 'pop', 'pow',
'prod', 'product', 'quantile', 'radd', 'rank', 'rdiv', 'replace',
'resample', 'rfloordiv', 'rmod', 'rmul', 'rolling', 'round',
'rpow', 'rsub', 'rtruediv', 'sample', 'select',
'sem', 'shape', 'shift', 'size', 'skew', 'slice_shift',
'squeeze', 'stack', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unstack', 'var', 'where',
},
'pd.Series': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'argsort', 'as_matrix', 'asfreq', 'asof', 'astype', 'at',
'at_time', 'autocorr', 'axes', 'between', 'between_time', 'bfill',
'bool', 'cat', 'clip', 'clip_lower', 'clip_upper', 'combine',
'combine_first', 'compound', 'corr', 'count', 'cov', 'cummax',
'cummin', 'cumprod', 'cumsum', 'describe', 'diff', 'div', 'divide',
'divmod', 'dot', 'drop', 'drop_duplicates', 'droplevel', 'dropna',
'dt', 'dtype', 'dtypes', 'duplicated', 'empty', 'eq', 'equals',
'ewm', 'expanding', 'factorize', 'ffill', 'fillna', 'filter',
'first', 'first_valid_index', 'flags', 'floordiv', 'ge', 'groupby',
'gt', 'hasnans', 'head', 'iat', 'idxmax', 'idxmin', 'iloc', 'imag',
'index', 'interpolate', 'is_monotonic', 'is_monotonic_decreasing',
'is_monotonic_increasing', 'is_unique', 'isin', 'isna', 'isnull',
'item', 'items', 'iteritems', 'ix', 'keys', 'kurt', 'kurtosis',
'last', 'last_valid_index', 'le', 'loc', 'lt', 'mad', 'map',
'mask', 'max', 'mean', 'median', 'min', 'mod', 'mode', 'mul',
'multiply', 'name', 'ndim', 'ne', 'nlargest', 'nonzero', 'notna',
'notnull', 'nsmallest', 'nunique', 'pct_change', 'pop', 'pow',
'prod', 'product', 'ptp', 'quantile', 'radd', 'rank', 'rdiv',
'rdivmod', 'real', 'repeat', 'replace', 'resample', 'rfloordiv',
'rmod', 'rmul', 'rolling', 'round', 'rpow', 'rsub', 'rtruediv',
'sample', 'searchsorted', 'select', 'sem', 'shape', 'shift',
'size', 'skew', 'slice_shift', 'sort_index', 'sort_values',
'squeeze', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unique', 'unstack',
'value_counts', 'var', 'where', 'xs',
},
}
__allowed_qualified = {
# allowed numpy functionality
'np': {
'abs', 'add', 'all', 'any', 'append', 'array', 'bool', 'ceil',
'complex', 'cos', 'cosh', 'cov', 'cumprod', 'cumsum', 'degrees',
'divide', 'divmod', 'dot', 'e', 'empty', 'exp', 'float', 'floor',
'hypot', 'inf', 'int', 'isfinite', 'isin', 'isinf', 'isnan', 'log',
'log10', 'log2', 'max', 'mean', 'median', 'min', 'mod', 'multiply',
'nan', 'ndim', 'pi', 'product', 'quantile', 'radians', 'rank',
'remainder', 'round', 'sin', 'sinh', 'size', 'sqrt', 'squeeze',
'stack', 'std', 'str', 'subtract', 'sum', 'swapaxes', 'take',
'tan', 'tanh', 'transpose', 'unique', 'var', 'where',
},
# allowed math functionality
'math': {
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil',
'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp',
'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf',
'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2',
'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'tau', 'trunc',
},
# allowed pd functionality
'pd': {
'DataFrame', 'array', 'concat', 'cut', 'date_range', 'factorize',
'interval_range', 'isna', 'isnull', 'melt', 'merge', 'notna',
'notnull', 'period_range', 'pivot', 'pivot_table', 'unique',
'value_counts', 'wide_to_long',
},
}
def __init__(self, expression,
ref_whitelist=None, ref_type=None,
custom_qualified=None):
self.allowed_qualified = self.__allowed_qualified.copy()
if ref_whitelist is None:
self.these = []
else:
self.these = ref_whitelist
if ref_type is None or ref_type not in self.__allowed_ref_types:
self.allowed_qualified['_this'] = set()
else:
self.allowed_qualified[
'_this'
] = self.__allowed_ref_types[ref_type]
if custom_qualified is not None:
self.allowed_qualified.update(custom_qualified)
self.expr = expression
self.__assertSafe()
def generateFunction(self):
"Generates a function to be evaluated outside the class"
cust_fun = "def fun(%s):\n\treturn(%s)" % (self.these[0], self.expr)
return cust_fun
def __assertSafe(self):
indeed, problematic_token = self.__isSafeStatement()
if not indeed:
self.detailedExcuse(problematic_token)
raise ValueError("Custom Expression is not safe.")
@staticmethod
def detailedExcuse(word):
"Gives a verbose statement for why users should not use some specific operators."
mess = None
if word == "for":
mess = "for loops and comprehensions are not allowed. Use numpy or pandas table operations instead."
elif word == ":":
mess = "Colons are not allowed. Use inline Python if/else statements."
elif word == "=":
mess = "Variable assignment is not allowed. Use object methods to substitute values."
elif word in ("[", "]"):
mess = "Direct indexing of arrays is not allowed. Use numpy or pandas functions/methods to address specific parts of tables."
else:
mess = "Not an allowed token in this operation"
print("( '%s' ) %s" % (word, mess))
def __isSafeStatement(self):
"""
Determines if a user-expression is safe to evaluate.
To be considered safe an expression may contain only:
- standard Python operators and numbers
- inline conditional expressions
- select functions and objects
by default, these come from the math, numpy and pandas
libraries, and must be qualified with the modules' conventional
names math, np, pd; can be overridden at the instance level
- references to a whitelist of objects (pd.DataFrames by default)
and their methods
"""
safe = True
# examples of user-expressions
# '-math.log(1 - elem/4096) * 4096 if elem != 1 else elem - 0.5'
# 'vec.median() + vec.sum()'
# 1. Break expressions into tokens
# e.g.,
# [
# '-', 'math.log', '(', '1', '-', 'elem', '/', '4096', ')', '*',
# '4096', 'if', 'elem', '!=', '1', 'else', 'elem', '-', '0.5'
# ]
# or
# ['vec.median', '(', ')', '+', 'vec.sum', '(', ')']
tokens = [
e for e in re.split(
r'([a-zA-Z0-9_.]+|[^a-zA-Z0-9_.() ]+|[()])', self.expr
) if e.strip()
]
# 2. Subtract allowed standard tokens
rem = [e for e in tokens if e not in self.__allowed_tokens]
# 3. Subtract allowed qualified objects from allowed modules
# and whitelisted references and their attributes
rem2 = []
for e in rem:
parts = e.split('.')
if len(parts) == 1:
if parts[0] in self.these:
continue
if len(parts) == 2:
if parts[0] in self.these:
parts[0] = '_this'
if parts[0] in self.allowed_qualified:
if parts[1] in self.allowed_qualified[parts[0]]:
continue
rem2.append(e)
# 4. Assert that rest are real numbers or strings
e = ''
for e in rem2:
try:
_ = float(e)
except ValueError:
safe = False
break
return safe, e
| mit |
CforED/Machine-Learning | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
rayNymous/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/triangulate.py | 70 | 7732 | import warnings
try:
set
except NameError:
from sets import Set as set
import numpy as np
from matplotlib._delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
j_unique = self._collapse_duplicate_points()
if j_unique.shape != self.x.shape:
warnings.warn(
"Input data contains duplicate x,y points; some values are ignored.",
DuplicatePointWarning,
)
self.j_unique = j_unique
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _collapse_duplicate_points(self):
"""Generate index array that picks out unique x,y points.
This appears to be required by the underlying delaunay triangulation
code.
"""
# Find the indices of the unique entries
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_unique = np.hstack([
True,
(np.diff(self.x[j_sorted]) != 0) | (np.diff(self.y[j_sorted]) != 0),
])
return j_sorted[mask_unique]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= np.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| agpl-3.0 |
bougui505/SOM | application/structureClustering.py | 1 | 6157 | #!/usr/bin/env python
"""
author: Guillaume Bouvier
email: [email protected]
creation date: 01 10 2013
license: GNU GPL
Please feel free to use and modify this, but keep the above information.
Thanks!
"""
import matplotlib.pyplot
import IO
import numpy
import itertools
import scipy.spatial
import scipy.stats
import scipy.ndimage.measurements
import SOM
import glob
#from newProtocolModule import *
from SOMTools import *
import cPickle
import os
relearn = False
if glob.glob('inputMatrix.dat') == []:
struct = IO.Structure('struct.pdb')
fd=open('resList')
reslist=[ line[:-1].split(' ') for line in fd ]
reslist=[ (int(x),y) for x,y in reslist ]
# dico={}
# mask=numpy.zeros((struct.atoms.shape[0]),dtype="bool")
# for x,y in reslist:
# if y not in dico:
# dico[y]=struct.getSelectionIndices([y],'segid')
# mask=numpy.logical_or(mask,numpy.logical_and(dico[y],struct.getSelectionIndices([x],'resid')))
mask = numpy.ones((struct.atoms.shape[0]),dtype="bool")
traj = IO.Trajectory('traj.dcd', struct, selectionmask=mask, nframe=11731)
restraints = readRestraints()
dists = []
dotProducts = []
# i = itertools.count()
shapeTraj = traj.array.reshape(traj.nframe,traj.natom,3)
for r1, r2 in restraints:
try:
atom1 =(mask.nonzero()[0]==numpy.logical_and(traj.struct.getSelectionIndices([r1[0]],"resid"),traj.struct.getSelectionIndices([r1[1]],"segid")).nonzero()[0][0]).nonzero()[0][0]
atom2 =(mask.nonzero()[0]==numpy.logical_and(traj.struct.getSelectionIndices([r2[0]],"resid"),traj.struct.getSelectionIndices([r2[1]],"segid")).nonzero()[0][0]).nonzero()[0][0]
trajA1 = shapeTraj[:,atom1]
trajA1m = shapeTraj[:,atom1-1] #for Calpha i-1
trajA1p = shapeTraj[:,atom1+1] #for Calpha i+1
trajA2 = shapeTraj[:,atom2]
trajA2m = shapeTraj[:,atom2-1] #for Calpha i-1
trajA2p = shapeTraj[:,atom2+1] #for Calpha i+1
v_A1_1 = trajA1p - trajA1
v_A1_2 = trajA1m - trajA1
crossA1 = numpy.cross(v_A1_1, v_A1_2)
v_A2_1 = trajA2p - trajA2
v_A2_2 = trajA2m - trajA2
crossA2 = numpy.cross(v_A2_1, v_A2_2)
dotA1A2 = numpy.dot(crossA1/numpy.linalg.norm(crossA1),crossA2.T/numpy.linalg.norm(crossA2)).diagonal()
distA1A2 = numpy.sqrt(((trajA1 - trajA2)**2).sum(axis=1))
dists.append(distA1A2)
dotProducts.append(dotA1A2)
except IndexError:
pass
inputMatrix = numpy.dstack((numpy.asarray(dists).T, numpy.asarray(dotProducts).T))
x, y, z = inputMatrix.shape
inputMatrix = inputMatrix.reshape(x,y*z)
#remove systematic zeros
# mask = 1-(inputMatrix == 0).all(axis=0)
# inputMatrix = inputMatrix.compress(mask, axis=1)
inMfile = open('inputMatrix.dat', 'w')
cPickle.dump(inputMatrix, inMfile)
inMfile.close()
else:
inMfile = open('inputMatrix.dat')
inputMatrix = cPickle.load(inMfile)
inMfile.close()
#Learning #############################################################################################################
mapFileName = 'map1.dat'
if glob.glob(mapFileName) == []:
som = SOM.SOM(inputMatrix, range(inputMatrix.shape[0]), metric='euclidean', autoParam = False)
som.learn()
os.system('mv map_%sx%s.dat map1.dat'%(som.X,som.Y))
else:
som = SOM.SOM(inputMatrix, range(inputMatrix.shape[0]), mapFileName=mapFileName, metric='euclidean', autoParam = False)
if relearn:
som.learn()
os.system('mv map_%sx%s.dat map1.dat'%(som.X,som.Y))
#######################################################################################################################
#Plot Maps ###############################################################
allMaps = allKohonenMap2D(som.Map, inputMatrix, metric='euclidean')
allMasks = findMinRegionAll(allMaps)
allMins = findMinAll(allMaps)
bmuDensity = numpy.reshape(allMins.sum(axis=1), (som.X,som.Y))
plotMat(bmuDensity, 'density.pdf', contour=False, interpolation='nearest')
density = numpy.reshape(allMasks.sum(axis=1), (som.X,som.Y))
plotMat(density, 'density2.pdf', contour=False)
#plot potential
pMap = restraintsPotential(som.Map[:,:,0:-1:2], 10, 28, 36)
stds = pMap.std(axis=0).std(axis=0)
varCoef = numpy.nan_to_num(scipy.stats.variation(scipy.stats.variation(pMap, axis=0), axis=0))
averagepMap = numpy.average(pMap, axis=2, weights=varCoef)
sumpMap = pMap.sum(axis=2)
plotMat(averagepMap, 'averageRestraintPotentialMap.pdf', contour=True)
plotMat(sumpMap, 'restraintPotentialMap.pdf', contour=True)
logHmap = numpy.log((som.Map[:,:,0:-1:2]/15)**2).sum(axis=2) # target distance = 15 A
plotMat(logHmap, 'logHmap.pdf', contour=True)
#Number of violated restraints
violationMap = (som.Map[:,:,0:-1:2] > 36).sum(axis=2)
plotMat(violationMap, 'violationMap.pdf', interpolation='nearest')
#EM map correlation
correlations = numpy.atleast_2d(numpy.genfromtxt('correlationEM.dat')[:,1])
meanCorrelationMatrix = getEMmapCorrelationMatrix(correlations, allMins, som)
plotMat(meanCorrelationMatrix, 'meanCorrelationMatrix.pdf', interpolation='nearest')
meanCorrelationRegions = getEMmapCorrelationMatrix(correlations, allMasks, som)
plotMat(meanCorrelationRegions, 'meanCorrelationRegions.pdf', contour=True)
#outside map correlation
outside = numpy.atleast_2d(numpy.genfromtxt('outsideEM.dat')[:,1])
meanOutsideMatrix = getEMmapCorrelationMatrix(outside, allMins, som)
plotMat(meanOutsideMatrix, 'meanOutsideMatrix.pdf', interpolation='nearest')
meanOutsideRegions = getEMmapCorrelationMatrix(outside, allMasks, som)
plotMat(meanOutsideRegions, 'meanOutsideRegions.pdf', contour=True)
##########################################################################
#uMatrix #############################################################
uMatrix = getUmatrix(som)
plotMat(uMatrix, 'uMatrix.pdf', contour=False)
clusterMatrix, nClusters = scipy.ndimage.measurements.label(findMinRegion(uMatrix, scale = 0.75))
plotMat(clusterMatrix, 'clusterMatrix.pdf', interpolation='nearest')
for i in range(1,nClusters+1):
indices = (allMins * numpy.atleast_2d((clusterMatrix == i).flatten()).T).any(axis=0)
cluster = numpy.array(som.inputnames)[indices]
outfile = open('cluster_%s.out'%i, 'w')
[outfile.write('%s\n'%(e+1)) for e in cluster] # start from 1
outfile.write('\n')
outfile.close()
vmdMap(sliceMatrix(uMatrix), 'uMatrix.map')
| gpl-2.0 |
toolforger/sympy | sympy/physics/quantum/tests/test_circuitplot.py | 93 | 2065 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/dataset_converters/greend/convert_greend.py | 1 | 6732 | from __future__ import print_function, division
from os import listdir, getcwd
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import numpy as np
import datetime
import time
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
import warnings
import numpy as np
from io import StringIO
from multiprocessing import Pool
from nilmtk.utils import get_module_directory
def _get_blocks(filename):
'''
Return a list of dataframes from a GREEND CSV file
GREEND files can be interpreted as multiple CSV blocks concatenated into
a single file per date. Since the columns of the individual blocks can
vary in a single file, they need to be read separately.
There are some issues we need to handle in the converter:
- the headers from the multiple blocks
- corrupted data (lines with null chars, broken lines)
- more fields than specified in header
'''
block_data = None
dfs = []
previous_header = None
print(filename)
# Use float64 for timestamps and float32 for the rest of the columns
dtypes = {}
dtypes['timestamp'] = np.float64
def _process_block():
if block_data is None:
return
block_data.seek(0)
try:
# ignore extra fields for some files
error_bad_lines = not (
('building5' in filename and 'dataset_2014-02-04.csv' in filename)
)
df = pd.read_csv(block_data, index_col='timestamp', dtype=dtypes, error_bad_lines=error_bad_lines)
except: #(pd.errors.ParserError, ValueError, TypeError):
print("ERROR", filename)
raise
df.index = pd.to_datetime(df.index, unit='s')
df = df.tz_localize("UTC").tz_convert("CET").sort_index()
dfs.append(df)
block_data.close()
special_check = (
('dataset_2014-01-28.csv' in filename and 'building5' in filename) or
('dataset_2014-09-02.csv' in filename and 'building6' in filename)
)
with open(filename, 'r') as f:
for line in f:
# At least one file have a bunch of nulls present, let's clean the data
line = line.strip('\0')
if 'time' in line:
# Found a new block
if not line.startswith('time'):
# Some lines are corrupted, e.g. 1415605814.541311,0.0,NULL,NUtimestamp,000D6F00029C2918...
line = line[line.find('time'):]
if previous_header == line.strip():
# Same exact header, we can treat it as the same block
# print('Skipping split')
continue
# Using a defaultdict for the dtypes didn't work with read_csv,
# so we fill a normal dict when we find the columns
cols = line.strip().split(',')[1:]
for col in cols:
dtypes[col] = np.float32
# print('Found new block')
_process_block()
block_data = StringIO()
previous_header = line.strip()
if special_check:
if ('0.072.172091508705606' in line or
'1409660828.0753369,NULL,NUL' == line):
continue
block_data.write(line)
# Process the remaining block
_process_block()
return (filename, dfs)
def _get_houses(greend_path):
house_list = listdir(greend_path)
return [h for h in house_list if isdir(join(greend_path,h))]
def convert_greend(greend_path, hdf_filename, use_mp=True):
"""
Parameters
----------
greend_path : str
The root path of the greend dataset.
hdf_filename : str
The destination HDF5 filename (including path and suffix).
use_mp : bool
Defaults to True. Use multiprocessing to load the files for
each building.
"""
store = pd.HDFStore(hdf_filename, 'w', complevel=5, complib='zlib')
houses = sorted(_get_houses(greend_path))
print('Houses found:', houses)
if use_mp:
pool = Pool()
h = 1 # nilmtk counts buildings from 1 not from 0 as we do, so everything is shifted by 1
for house in houses:
print('Loading', house)
abs_house = join(greend_path, house)
dates = [d for d in listdir(abs_house) if d.startswith('dataset')]
target_filenames = [join(abs_house, date) for date in dates]
if use_mp:
house_data = pool.map(_get_blocks, target_filenames)
# Ensure the blocks are sorted by date and make a plain list
house_data_dfs = []
for date, data in sorted(house_data, key=lambda x: x[0]):
house_data_dfs.extend(data)
else:
house_data_dfs = []
for fn in target_filenames:
house_data_dfs.extend(_get_blocks(fn)[1])
overall_df = pd.concat(house_data_dfs, sort=False).sort_index()
dups_in_index = overall_df.index.duplicated(keep='first')
if dups_in_index.any():
print("Found duplicated values in index, dropping them.")
overall_df = overall_df[~dups_in_index]
m = 1
for column in overall_df.columns:
print("meter {}: {}".format(m, column))
key = Key(building=h, meter=m)
print("Putting into store...")
df = overall_df[column].to_frame() #.dropna(axis=0)
# if drop_duplicates:
# print("Dropping duplicated values in data...")
# df = df.drop_duplicates()
df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), df, format = 'table')
m += 1
# print('Flushing store...')
# store.flush()
h += 1
store.close()
# retrieve the dataset metadata in the metadata subfolder
metadata_dir = join(get_module_directory(), 'dataset_converters', 'greend', 'metadata')
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
#is only called when this file is the main file... only test purpose
if __name__ == '__main__':
t1 = time.time()
convert_greend('GREEND_0-2_300615',
'GREEND_0-2_300615.h5')
dt = time.time() - t1
print()
print()
print('Time passed: {}:{}'.format(int(dt/60), int(dt%60)))
| cc0-1.0 |
sonofeft/M_Pool | m_pool/matrix_obj.py | 1 | 37218 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import itertools
import copy
import numpy as np
from scipy.interpolate import interp1d
from scipy.interpolate import RegularGridInterpolator
try:
from scipy.optimize import minimize
except:
print("...WARNING... scipy.optimize.minimize did NOT import...")
print(" ... min/max functions are UNAVAILABLE ...")
from m_pool.axis_obj import Axis
from m_pool.axis_pool import AxisPool, axis_obj_dammit
from m_pool.InterpProp_scipy import InterpProp
try:
import pylab
got_pylab = True
except:
got_pylab = False
class Matrix(object):
'''An Matrix object holds data for N dimensional data
There are N Axis objects for the data.
The data is a single number indexed by the axes index values.
*** Structured to easily pickle via a dictionary of named values for properties. ***
'''
def __init__(self, D={'name':'matrixName', 'matValArr':None, 'units':'',
'axisNameL':None, 'axisPoolObj':None} ):
'''Initialize with a dictionary so that pickle files can easily save and read objects
axisNameL holds the names of axes that are in the axisPoolObj.
The Matrix is dimensioned by the size of the axes in the order specified.
An Axis obj can be shared by many Matrix objects.
'''
self.name = D.get('name','UnkMatrix')
self.matValArr = D.get('matValArr', None)
self.units = D.get('units','')
# Let it crash if axisNameL and axisPoolObj are not specified
try:
self.axisNameL = D.get('axisNameL')
self.axisPoolObj = D.get('axisPoolObj')
except:
print('ERROR... both axisNameL and axisPoolObj MUST be specified in Matrix')
sys.exit()
self.axisL = [self.axisPoolObj.axisD[name] for name in self.axisNameL]
self.NumAxes = len( self.axisL )
shape = [len(A) for A in self.axisL]
# Init to Zeros if axes specified, but data not specified
if self.matValArr is None and shape:
self.matValArr = np.zeros( shape )
self.axisPoolObj.connectMatrixToAxes(self, self.axisNameL)
# temporary list of numpy matrices used for interpolation
self.terp_mL = [self.matValArr] # list of matrices used for interpolation
self.terp_reg_grid = None # will be initialized if used
self.terp_reg_grid_shape = None
def solve_interp_min(self, order=3, method='TNC', tol=1.0E-8): # method can be: SLSQP, TNC
return self.solve_interp_minmax( find_min=True, order=order, method=method, tol=tol)
def solve_interp_max(self, order=3, method='TNC', tol=1.0E-8): # method can be: SLSQP, TNC
return self.solve_interp_minmax( find_min=False, order=order, method=method, tol=tol)
def solve_interp_minmax(self, find_min=False,
order=3, method='TNC', tol=1.0E-8): # method can be: SLSQP, TNC
boundsL = []
startValL = []
axisNameL = []
mn,mx = self.get_min_max()
range = mx - mn
interpD = {} # dictionary of axis values
if find_min:
iminmax = self.get_minima_indeces()
else:
iminmax = self.get_peak_indeces()
for i,im in enumerate( iminmax ):
#print 'minmax value at %s=%g'%(self.axisL[i].name, self.axisL[i][im])
#EPS=1.0E-10*abs(self.axisL[i][-1] - self.axisL[i][0])
boundsL.append( (self.axisL[i][0],self.axisL[i][-1]) )
startValL.append( self.axisL[i][im] )
axisNameL.append( self.axisL[i].name )
interpD[self.axisL[i].name] = self.axisL[i][im]
#print 'minmax value =',self.matValArr[ iminmax ],' Min =',mn,' Max =',mx
#print 'boundsL =',boundsL
#print 'startValL =',startValL
#print 'axisNameL =',axisNameL
#print 'interpD =',interpD
def fun( row ): # row is in axis-order from self.axisL
for i,val in enumerate(row):
interpD[ axisNameL[i] ] = val
mval = self.interp(order=order, **interpD )
norm_val = float( (mval-mn)/range ) # normalize to help convergence
if find_min:
return norm_val
else:
return -norm_val
res = minimize(fun, tuple(startValL), method=method,
bounds=tuple(boundsL), tol=tol, options={'disp':False})
print(res)
fun( res.x )# make sure interpD is set
minmax_val = float( self.interp( **interpD ) )
return interpD, minmax_val
def interp(self, order=1, **kwds): # kwds contains axis names... returns interpolated val
if order>1:
return self.interp_higher_order( order=order, **kwds )
else:
return self.interp_linear( order=order, **kwds )
def interp_linear(self, **kwds ):
if (self.terp_reg_grid is None) or (self.terp_reg_grid_shape != self.matValArr.shape ):
self.terp_reg_grid_shape = self.matValArr.shape
axis_valL = [ A.get_trans_valueL() for A in self.axisL ]
self.terp_reg_grid = RegularGridInterpolator(axis_valL, self.matValArr)
ptArr = np.array( [A.transObj( kwds[ A.name ] ) for A in self.axisL] )
ans = self.terp_reg_grid( ptArr )
#print( 'ans=',ans )
return ans[0]
def interp_higher_order(self, order=3, **kwds): # kwds contains axis names... returns interpolated val
'''
Call as: M.interp(order=3, pc=100, eps=20, mr=2.0)
Uses scipy.interpolate.interp1d
'''
# Only generate list of temporary matrices if 1st time, or if shape change
if (len(self.terp_mL)==1) or (self.terp_mL[0].shape != self.matValArr.shape):
#print 'orig shape =',self.matValArr.shape
self.terp_mL[0] = self.matValArr # list of matrices used for interpolation
#remove first dimension from each subsequent member of self.terp_mL
next_shape = list( self.matValArr.shape )[1:]
#print 'next_shape =',next_shape
while len(next_shape)>0:
self.terp_mL.append( np.zeros( next_shape ) )
next_shape = next_shape[1:]
#print 'next_shape =',next_shape
else:
self.terp_mL[0] = self.matValArr # verify 1st matrix is current
# interp from previous matrix for next matrix
for ia,m in enumerate(self.terp_mL[1:]): # ia is index into self.axisL for current axis
A = self.axisL[ia]
xval = A.transObj( kwds[ A.name ] )
kind = min(len(A)-1, order)
#print '... interpolating into',A.name,' xval=',xval,A
for mindeces in itertools.product(*(list(range(s)) for s in m.shape)):
# mindeces is a tuple index into m
# indeces is index into last m
yL = []
#print 'mindeces =',mindeces
mindecesL = list( mindeces )
for iv,vax in enumerate( A ):
indeces = tuple( [iv] + mindecesL )
val = self.terp_mL[ia][indeces]
#print indeces, val
yL.append( val )
#print 'xL=',A.transArr
#print 'yL=',yL
try:
# do NOT set , fill_value="extrapolate"
# ... let if fail so Extrapolating logic is used.
m[mindeces] = interp1d( A.transArr , yL, kind=kind, fill_value="extrapolate")(xval)
except:
#print('Extrapolating',A.name,'axis =',A.transArr,' xval=',xval)
print('Extrapolating',A.name,'axis %s='%A.name, kwds[ A.name ])
#print(' yL =',yL)
if xval>=A.transArr[-2]:
m[mindeces] = yL[-1] # assume out of bounds at high end
else:
m[mindeces] = yL[0] # assume out of bounds at low end
#print 'Last matrix(array) =',self.terp_mL[-1]
A = self.axisL[-1]
kind = min(len(A)-1, order)
xval = A.transObj( kwds[ A.name ] )
m = self.terp_mL[-1]
#print 'm =',m
#print 'axis =',A,' xval=',xval
try:
result = interp1d( A.transArr, m, kind=kind, fill_value="extrapolate")( xval )
except:
print('Extrapolating','axis =',A,' xval=',xval)
print(' m =',m)
if xval>=A.transArr[-2]:
result = m[-1] # assume out of bounds at high end
else:
result = m[0] # assume out of bounds at low end
#print 'type(result)=',type(result), result.shape
#return result
return float( result )
def numNonZero(self):
return np.count_nonzero( self.matValArr )
def iPercentFull(self): # return an integer percent full
ntotal = 1
for i in self.matValArr.shape:
ntotal *= i
nfull = np.count_nonzero( self.matValArr )
return (100*nfull) / ntotal
def get_pickleable_dict(self):
'''Note that matrix_pool supplies axisPoolObj for pickled Matrix'''
return {'name':self.name, 'matValArr':self.matValArr, 'units':self.units,
'axisNameL':self.axisNameL}
def insert_dimension(self, iaxis,i ):
newMat = np.insert( self.matValArr, i, 0.0, axis=iaxis )
self.matValArr = newMat
def long_summ(self):
sL = [self.short_summ()]
sL.append( 'get_range = %s'%( self.get_range(), ))
sL.append( 'get_ave = %s'%( self.get_ave(), ))
sL.append( 'get_mean = %s'%( self.get_mean(), ))
sL.append( 'get_std = %s'%( self.get_std(), ))
sL.append( 'get_median = %s'%( self.get_median(), ))
sL.append( 'get_min_max = %s'%( self.get_min_max(), ))
return '\n'.join( sL )
def short_summ(self):
if self.matValArr is None:
sL = ['Matrix %s (shape=%s) %s (units=%s)'%(self.name, str(self.matValArr),self.name, self.units)]
else:
sL = ['Matrix %s (shape=%s) %s (units=%s)'%(self.name, str(self.matValArr.shape),self.name, self.units)]
for A in self.axisL:
s = str(A)
ssL = s.split('\n')
for s in ssL:
sL.append( ' ' + s )
#sL.append( str(A) )
return '\n'.join( sL )
def __str__(self):
s = self.short_summ()
sL = s.split('\n')
#if self.matValArr is None:
# sL = ['Matrix %s (shape=%s) %s (units=%s)'%(self.name, str(self.matValArr),self.name, self.units)]
#else:
# sL = ['Matrix %s (shape=%s) %s (units=%s)'%(self.name, str(self.matValArr.shape),self.name, self.units)]
#for A in self.axisL:
# sL.append( str(A) )
sL.append( str(self.matValArr) )
return '\n'.join( sL )
def __getitem__(self, iL):
return self.matValArr[ tuple(iL) ]
def __setitem__(self, iL, val): # use as M[(i,j,k)] = val
if val is None:
print('ERROR... illegal value for "val" in Matrix.set. val =',val)
else:
self.matValArr[tuple(iL)] = float(val)
def setByName(self, **kwds): # kwds contains axis names and "val"
'''Call as: M.setByName(pc=100, eps=20, mr=2.0, val=29.23)'''
iL = [] # list of indeces into matrix array
for A in self.axisL:
iL.append( A.getExactIndex( kwds[A.name] ) )
self.matValArr[tuple(iL)] = float( kwds['val'] )
def getByName(self, **kwds): # kwds contains axis names... returns val
'''Call as: M.getByName(pc=100, eps=20, mr=2.0)'''
iL = [] # list of indeces into matrix array
for A in self.axisL:
iL.append( A.getExactIndex( kwds[A.name] ) )
return self.matValArr[tuple(iL)]
def get_list_of_peak_indeces(self):
"""Returns a list of all occurances of max value"""
max_val = self.get_max()
return np.argwhere( self.matValArr == max_val )
def get_peak_indeces(self):
"""Returns 1st occurance of max value"""
imax = np.unravel_index(self.matValArr.argmax(), self.matValArr.shape)
return imax
def get_peak_dict(self):
"""Returns 1st occurance of max value"""
imax = np.unravel_index(self.matValArr.argmax(), self.matValArr.shape)
D = {}
for i,im in enumerate( imax ):
D[self.axisL[i].name] = self.axisL[i][im]
return D
def get_minima_indeces(self):
"""Returns 1st occurance of min value"""
imin = np.unravel_index(self.matValArr.argmin(), self.matValArr.shape)
return imin
def get_minima_dict(self):
"""Returns 1st occurance of min value"""
imin = np.unravel_index(self.matValArr.argmin(), self.matValArr.shape)
D = {}
for i,im in enumerate( imin ):
D[self.axisL[i].name] = self.axisL[i][im]
return D
def get_min_max(self):
return np.nanmin(self.matValArr), np.nanmax(self.matValArr)
def get_min(self):
return np.nanmin(self.matValArr)
def get_max(self):
return np.nanmax(self.matValArr)
def get_sum(self):
return np.nansum(self.matValArr)
def get_ave(self):
return np.average(self.matValArr)
def get_mean(self):
return np.mean(self.matValArr)
def get_std(self):
return np.std(self.matValArr)
def get_median(self):
return np.median(self.matValArr)
def get_range(self): # returns max - min
return np.ptp(self.matValArr) # peak to peak
def __len__(self):
return len( self.matValArr )
def shape(self):
return self.matValArr.shape
def size(self):
return np.prod( self.matValArr.shape )
def iter_indeces(self): # an iterator over the indeces of the matrix
for indeces in itertools.product(*(list(range(s)) for s in self.matValArr.shape)):
yield indeces
def iter_items(self): # iterator returns indeces and value at location
for indeces in itertools.product(*(list(range(s)) for s in self.matValArr.shape)):
val = self.matValArr[indeces]
yield indeces,val
def full_iter_items(self): # iterator returns indeces, value and axes value dictionary
self.axisNameL
for indeces in itertools.product(*(list(range(s)) for s in self.matValArr.shape)):
val = self.matValArr[indeces]
D={}
for i,axname in enumerate( self.axisNameL ):
D[axname] = self.axisL[ i ][indeces[i]]
yield indeces,D,val
def clone(self):
Mclone = copy.deepcopy( self )
Mclone.name = self.name + '(clone)'
return Mclone
def __neg__(self):
Mclone = self.clone()
Mclone.matValArr = np.negative( Mclone.matValArr )
return Mclone
#return self * (-1.0)
def __abs__(self):
Mclone = self.clone()
Mclone.matValArr = abs(self.matValArr)
return Mclone
def __add__(self, other):
Mclone = self.clone()
if isinstance(other, Matrix):
Mclone.name = self.name + ' + %s'%other.name
Mclone.matValArr = self.matValArr + other.matValArr
else:
Mclone.name = self.name + ' + %s'%other
Mclone.matValArr = self.matValArr + other
return Mclone
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if isinstance(other, Matrix):
self.name = self.name + ' + %s'%other.name
self.matValArr = self.matValArr + other.matValArr
else:
self.name = self.name + ' + %s'%other
self.matValArr = self.matValArr + other
return self
def __sub__(self, other):
Mclone = self.clone()
if isinstance(other, Matrix):
Mclone.name = self.name + ' - %s'%other.name
Mclone.matValArr = self.matValArr - other.matValArr
else:
Mclone.name = self.name + ' - %s'%other
Mclone.matValArr = self.matValArr - other
return Mclone
def __rsub__(self, other):
Mclone = self.clone()
Mclone.matValArr = np.negative( Mclone.matValArr )
return Mclone + other
def __isub__(self, other):
if isinstance(other, Matrix):
self.name = self.name + ' - %s'%other.name
self.matValArr = self.matValArr - other.matValArr
else:
self.name = self.name + ' - %s'%other
self.matValArr = self.matValArr - other
return self
def __mul__(self, other):
Mclone = self.clone()
if isinstance(other, Matrix):
Mclone.name = self.name + ' * %s'%other.name
Mclone.matValArr = self.matValArr * other.matValArr
else:
Mclone.name = self.name + ' * %s'%other
Mclone.matValArr = self.matValArr * other
return Mclone
def __rmul__(self, other):
return self * other
def __imul__(self, other):
if isinstance(other, Matrix):
self.name = self.name + ' * %s'%other.name
self.matValArr = self.matValArr * other.matValArr
else:
self.name = self.name + ' * %s'%other
self.matValArr = self.matValArr * other
return self
def __div__(self, other):
Mclone = self.clone()
if isinstance(other, Matrix):
Mclone.name = self.name + ' / %s'%other.name
Mclone.matValArr = self.matValArr / other.matValArr
else:
Mclone.name = self.name + ' / %s'%other
Mclone.matValArr = self.matValArr / other
return Mclone
def __rdiv__(self, other):
Mclone = self.clone()
Mclone.matValArr = np.reciprocal( Mclone.matValArr )
return Mclone * other
def __idiv__(self, other):
#print ' plain div'
if isinstance(other, Matrix):
self.name = self.name + ' / %s'%other.name
self.matValArr = self.matValArr / other.matValArr
else:
self.name = self.name + ' / %s'%other
self.matValArr = self.matValArr / other
return self
def __truediv__(self, other): # assumes from __future__ import division
return self.__div__(other)
def __rtruediv__(self, other): # assumes from __future__ import division
return self.__rdiv__(other)
def __itruediv__(self, other): # assumes from __future__ import division
#print 'truediv'
return self.__idiv__(other)
def __pow__(self, other):
Mclone = self.clone()
if isinstance(other, Matrix):
Mclone.name = self.name + ' ** %s'%other.name
Mclone.matValArr = self.matValArr ** other.matValArr
else:
Mclone.name = self.name + ' ** %s'%other
Mclone.matValArr = self.matValArr ** other
return Mclone
def __rpow__(self, other):
Mclone = self.clone()
Mclone.matValArr = (Mclone.matValArr*0.0) + other
return Mclone**self
def __ipow__(self, other):
#print ' plain div'
if isinstance(other, Matrix):
self.name = self.name + ' ** %s'%other.name
self.matValArr = self.matValArr ** other.matValArr
else:
self.name = self.name + ' ** %s'%other
self.matValArr = self.matValArr ** other
return self
def get_sub_matrix(self, **kwds): # kwds contains axis names... returns val
'''Call as: M.get_sub_matrix(pc=100, eps=20, mr=2.0)
Return a smaller Matrix at specified values in kwds'''
is_in_cutL=[0 for axname in self.axisNameL] # set to 1 if axname is a cut plane
orig_indexL = is_in_cutL[:] # hold index into axis for input axis value
newAxisNameL = [] # smaller list of axis names in new, smaller Matrix
for ia,axname in enumerate(self.axisNameL):
if axname in kwds:
is_in_cutL[ia]=1
# Also hold const index in cut axis
orig_indexL[ia] = self.axisL[ia].getExactIndex( kwds[axname] )
else:
newAxisNameL.append( axname )
#print 'is a slice plane =',is_in_cutL
#print 'Index of slice plane =',orig_indexL
new_name = self.name +'_'+ '_'.join( ['%s=%s'%(n,v) for n,v in list(kwds.items())] )
M = Matrix( {'name':new_name, 'units':self.units,
'axisNameL':newAxisNameL, 'axisPoolObj':self.axisPoolObj} )
# TODO: change to faster numpy slicing method.
for new_indeces in M.iter_indeces():
inew = 0
for i,ia in enumerate(is_in_cutL):
if ia==0: # if axis in new Matrix, iterate indeces
orig_indexL[i] = new_indeces[inew]
inew += 1
M[ tuple(new_indeces) ] = self.matValArr[ tuple(orig_indexL) ]
return M
def values_in_range(self, **kwds):
for k,v in kwds.items():
A = self.get_axis_by_name( k )
if not A.value_in_range( v ):
return False
return True
def get_axis_by_name(self, aname):
for a in self.axisL:
if a.name == aname:
return a
return None
def matrix_axis_name_list(self):
return [a.name for a in self.axisL]
def is_axis_name(self, axis_name):
return axis_name in self.matrix_axis_name_list()
def get_indeces_where(self, if_gt=0.0, if_lt=None):
"""Return indeces of values in range. Ignore if set to None"""
if if_lt is None:
return np.argwhere( self.matValArr > if_gt )
elif if_gt is None:
return np.argwhere( self.matValArr < if_lt )
else:
return np.argwhere( self.matValArr < if_lt and self.matValArr > if_gt )
def get_dict_of_indeces(self, indeces):
D={}
for i,axname in enumerate( self.axisNameL ):
D[axname] = self.axisL[ i ][indeces[i]]
return D
def fill_missing_from_good_neighbors(self, bad_if_lt=0.0, bad_if_gt=None,
good_if_gt=0.0, good_if_lt=None):
badL = self.get_indeces_where( if_gt=bad_if_gt, if_lt=bad_if_lt)
for iL in badL:
good_ivL = self.get_nearest_good_neighbors(iL, good_if_gt=good_if_gt, good_if_lt=good_if_lt)
sum_wts = 0.0 # sum of data pt weights
sum_val_x_wts = 0.0 # sum of value * wt
for good_iv in good_ivL:
good_indeces = good_iv[0]
dist = sum( [ abs(i1-i2) for (i1,i2) in zip(iL,good_indeces) ] )
#print(dist, iL, good_indeces)
#print(iL, good_indeces, dist)
if dist > 0:
wt = 1.0/float(dist)
sum_wts += wt
sum_val_x_wts += wt * self[ good_indeces ]
if sum_wts > 0.0:
new_val = sum_val_x_wts / sum_wts
#iD = self.get_dict_of_indeces(iL)
#iD['val'] = new_val
#self.setByName( **iD )
self[ iL ] = new_val
#print(iL,'new_val',new_val, self.get_dict_of_indeces(iL), self[iL])
#for good_iv in good_ivL:
# print(' ',good_iv, self.get_dict_of_indeces(good_iv[0]), self[good_iv[0]])
def get_nearest_good_neighbors(self, iL, good_if_gt=0.0, good_if_lt=None ):
"""Return the indeces of nearest good neighbors"""
def is_good_val( val ):
if good_if_gt is None:
return val < good_if_lt
elif good_if_lt is None:
return val > good_if_gt
else:
return val > good_if_gt and val < good_if_lt
iL = list( iL ) # makes a list copy
good_ivL = [] # list of tuples (indeces, val)
for ia, i in enumerate( iL ):
a = self.axisL[ia]
itestL = iL[:]
j = i+1
while j < len( a ):
itestL[ia] = j
if is_good_val( self[ itestL ] ):
good_ivL.append( (itestL, self[itestL]) )
j += len(a)
j += 1
itestL = iL[:]
j = i-1
while j >= 0:
itestL[ia] = j
if is_good_val( self[ itestL ] ):
good_ivL.append( (itestL, self[itestL]) )
j -= len(a)
j -= 1
return good_ivL
def interp_missing_from_good_neighbors(self, bad_if_lt=0.0, bad_if_gt=None,
good_if_gt=0.0, good_if_lt=None):
badL = self.get_indeces_where( if_gt=bad_if_gt, if_lt=bad_if_lt)
print( "Replacing %i bad values from Nearest Neighbors in"%len(badL), self.name )
for iL in badL:
good_ivL = self.get_nearest_good_neighbors(iL, good_if_gt=good_if_gt, good_if_lt=good_if_lt)
sum_wts = 0.0 # sum of data pt weights
sum_val_x_wts = 0.0 # sum of value * wt
for good_iv in good_ivL:
good_indeces = good_iv[0]
dist = sum( [ abs(i1-i2) for (i1,i2) in zip(iL,good_indeces) ] )
#print(dist, iL, good_indeces)
#print(iL, good_indeces, dist)
if dist > 0:
wt = 1.0/float(dist)
sum_wts += wt
sum_val_x_wts += wt * self[ good_indeces ]
if sum_wts > 0.0:
new_val = sum_val_x_wts / sum_wts
#iD = self.get_dict_of_indeces(iL)
#iD['val'] = new_val
#self.setByName( **iD )
self[ iL ] = new_val
#print(iL,'new_val',new_val, self.get_dict_of_indeces(iL), self[iL])
#for good_iv in good_ivL:
# print(' ',good_iv, self.get_dict_of_indeces(good_iv[0]), self[good_iv[0]])
def get_1d_interp_fill_value(self, i_centerL, good_if_gt=0.0, good_if_lt=None):
"""
Given the indeces, i_centerL, of a point in the matrix, M, return all
of the 1D matrices with GOOD values as defined by good_if_gt and good_if_lt.
"""
valueL = [] # list of interpolated values (will take average at end)
for ia,a in enumerate(self.axisL):
# start with a fresh center list
cL = list( i_centerL )
aL = [] # good axis value list
vL = [] # good value list
for i in range( len(a) ):
cL[ia] = i
val = self[ cL ]
if good_if_gt is None:
if val < good_if_lt:
aL.append( a.transObj( a.valueL[i] ) )
vL.append( val )
elif good_if_lt is None:
if val > good_if_gt:
aL.append( a.transObj( a.valueL[i] ) )
vL.append( val )
else:
if val > good_if_gt and val < good_if_lt:
aL.append( a.transObj( a.valueL[i] ) )
vL.append( val )
if aL:
terp = InterpProp(aL, vL, extrapOK=1, linear=1)
valueL.append( terp( a.transObj( a.valueL[ i_centerL[ia] ] ) ) )
#print(' val:',val,' terpVal:',valueL[-1], 'aL:',aL,' vL:',vL)
if valueL:
val = sum(valueL) / len(valueL)
else:
val = self[ i_centerL ]
return val
def fill_missing_from_1d_interp(self, bad_if_lt=0.0, bad_if_gt=None,
good_if_gt=0.0, good_if_lt=None):
badL = self.get_indeces_where( if_gt=bad_if_gt, if_lt=bad_if_lt)
print( "1D Interpolating %i bad values in"%len(badL), self.name )
new_valD = {} # index:bad_indeces, value:val
for iL in badL:
val = self.get_1d_interp_fill_value( iL, good_if_gt=good_if_gt, good_if_lt=good_if_lt)
new_valD[ tuple(iL) ] = val
for k,v in new_valD.items():
self[ k ] = v
# Just in case interpolation fails, use good neighbors
self.interp_missing_from_good_neighbors( bad_if_lt=bad_if_lt, bad_if_gt=bad_if_gt,
good_if_gt=good_if_gt, good_if_lt=good_if_lt)
def plot_x_param(self, xVar='', param='', fixedD=None,
interp_pts=0, interp_order=2,
is_semilog=False, marker='o', markersize=0,
rev_param_order=False, show_grid=True,
min_val=float('-inf'), max_val=float('inf')):
"""
Make a plot of xVar vs matrix value, parameterized by param.
If left blank, use names of 1st two axes.
Set any other axis values based on fixedD.
If not in fixedD, then use median value of axis.
If interp_pts>0, insert interpolated points between axis pts
"""
#self.axisL self.matValArr
if len( self.axisL ) < 2:
print('ERROR... can not make plot_x_param with less than 2 axes.')
return
if not got_pylab:
print('ERROR... pylab FAILED to import.')
return
# if xVar not input, set it to one of 1st 2 axes
if not self.is_axis_name(xVar):
if param != self.axisL[0].name:
xVar = self.axisL[0].name
else:
xVar = self.axisL[1].name
# if param not input, set it to one of 1st 2 axes
if not self.is_axis_name(param):
if xVar != self.axisL[0].name:
param = self.axisL[0].name
else:
param = self.axisL[1].name
#print('xVar=%s, param=%s'%(xVar, param))
xAxis = self.get_axis_by_name( xVar )
pAxis = self.get_axis_by_name( param )
#print( 'xAxis =',xAxis )
#print( 'pAxis =',pAxis )
changing_paramL = [xVar, param]
# prepare fixedD of constant values
fixed_paramL = []
if fixedD is None:
D = {}
else:
D = fixedD.copy()
sL = [] # making title string of fixed values
fixedD = {}
for a in self.axisL:
if a.name not in D:
D[a.name] = a.get_middle_value()
if a.name not in changing_paramL:
fixed_paramL.append( a.name )
sL.append( '%s=%g %s'%(a.name, D[a.name], a.units) )
fixedD[a.name] = D[a.name]
fixed_s = ', '.join(sL)
#print( "D=", D, ' fixedD',fixedD )
#print( 'fixed_paramL',fixed_paramL, ' fixed_s',fixed_s )
#print( 'changing_paramL',changing_paramL )
# .......... get sub-matrix to speed things up ..................
SP = self.get_sub_matrix( **fixedD )
# ================= start making plot ========================
if rev_param_order:
paramL = reversed( pAxis.valueL )
else:
paramL = pAxis.valueL
pylab.figure()
markerD = {} # matplotlib options
if marker:
markerD['marker'] = '.'
markerD['markevery'] = 1 + interp_pts
if markersize:
markerD['markersize'] = markersize
# .... begin iterating over param and xVar
for p in paramL:
fL = []
xL = []
for x in xAxis.valueL:
D[ xVar ] = x
D[ param ] = p
if interp_pts:
if x in xAxis.valueL:
f = SP.getByName( **D )
else:
f = SP.interp( order=interp_order, **D)
else:
f = SP.getByName( **D )
if f is not None and ( min_val <= f <= max_val):
fL.append( f )
xL.append( x )
if xL:
if interp_pts:
# make a transformed list of x's for interpolation
xtL = [ xAxis.transObj(x) for x in xL]
# make full xvarL for interpolation
xvarL = xAxis.valueL[:] # make a copy... it will be modified
f = 1.0/(1.0 + interp_pts)
for i in range( len(xL) - 1 ):
for j in range( interp_pts ):
xvarL.append( xL[i] + f*(j+1) * (xL[i+1] - xL[i]) )
xL = sorted( xvarL )
fL = [ interp1d( xtL , fL, kind=interp_order, fill_value="extrapolate")\
( xAxis.transObj(x) ) for x in xL]
# Assume there are some interpolated points... plot twice.
if is_semilog:
lastplot = pylab.semilogx(xL, fL, label='%s=%g'%(param, p), **markerD)
c = lastplot[0].get_color()
pylab.semilogx(xL, fL, linestyle='None', marker='|', color=c)
else:
lastplot = pylab.plot(xL, fL, label='%s=%g'%(param, p), **markerD)
c = lastplot[0].get_color()
pylab.plot(xL, fL, linestyle='None', marker='|', color=c)
pylab.title( '%s: %s'%(self.name, fixed_s) )
pylab.legend(loc='best', framealpha=0.3)
def axis_label( a ):
if a.units:
return '%s (%s)'%(a.name, a.units)
else:
return a.name
if show_grid:
pylab.grid()
pylab.xlabel( axis_label( xAxis ) )
pylab.ylabel( self.name )
if __name__=="__main__":
epsAxis = Axis({'name':'eps', 'valueL':[10., 20., 30., 40.],
'units':'', 'transform':'log10'})
# Just a dict, not an Axis obj
pcAxis = {'name':'pc', 'valueL':[100.,200.,300], 'units':'psia', 'transform':'log10'}
mrAxis = Axis({'name':'mr', 'valueL':[1,2,3,4,5],
'units':'', 'transform':''})
axesDefL = [epsAxis, pcAxis, mrAxis]
AP = AxisPool( {'axesDefL':axesDefL} )
axisNameL = ['eps','pc','mr']
shape = [len(AP.axisD[name]) for name in axisNameL]
print('shape =',shape)
matValArr = np.zeros( shape )
n0,n1,n2 = axisNameL
for i0,v0 in enumerate(AP.axisD[n0]):
for i1,v1 in enumerate(AP.axisD[n1]):
for i2,v2 in enumerate(AP.axisD[n2]):
matValArr[i0,i1,i2] = v0+v1+v2
M = Matrix( {'name':'isp_ode', 'matValArr':matValArr, 'units':'',
'axisNameL':axisNameL, 'axisPoolObj':AP} )
#print M.axisL
print(M)
#print type( M.axisL[0] ) == Axis
#print type( {1:1} ) == dict
print(M[(0,0,0)],M[3,2,4],'__getitem__ examples')
print('_'*55)
print(mrAxis.matrixConnectionL)
#epsAxis.add_value( 16.0 )
j = AP.add_value_to_Axis('pc', 250.0)
print(M)
print(' ...Added new axis value. Matrix expands to accomodate')
print('_'*55)
for i in range( len(epsAxis) ):
for k in range( len(mrAxis) ):
M[(i,j,k)] = 7777.0
print(M)
print(' ...Set inserted value to 7777. Use index from axis value insert.')
print('_'*55)
print()
pc = 250.0
for eps in epsAxis:
for mr in mrAxis:
M.setByName( pc=pc, eps=eps, mr=mr, val=9999.0 )
print(M)
print(' ...change 7777 to 9999 using dictionary indexing pc=pc.')
print('_'*55)
| gpl-3.0 |
lbishal/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
bzero/bitex | libs/coinkit/coinkit/words.py | 11 | 726962 | # -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
TOP_ENGLISH_WORDS = ["the", "of", "and", "to", "a", "in", "for", "is", "on", "that", "by", "this", "with", "i", "you", "it", "not", "or", "be", "are", "from", "at", "as", "your", "all", "have", "new", "more", "an", "was", "we", "will", "home", "can", "us", "about", "if", "page", "my", "has", "search", "free", "but", "our", "one", "other", "do", "no", "information", "time", "they", "site", "he", "up", "may", "what", "which", "their", "news", "out", "use", "any", "there", "see", "only", "so", "his", "when", "contact", "here", "business", "who", "web", "also", "now", "help", "get", "view", "online", "c", "e", "first", "am", "been", "would", "how", "were", "me", "s", "services", "some", "these", "click", "its", "like", "service", "x", "than", "find", "price", "date", "back", "top", "people", "had", "list", "name", "just", "over", "state", "year", "day", "into", "email", "two", "health", "n", "world", "re", "next", "used", "go", "b", "work", "last", "most", "products", "music", "buy", "data", "make", "them", "should", "product", "system", "post", "her", "city", "t", "add", "policy", "number", "such", "please", "available", "copyright", "support", "message", "after", "best", "software", "then", "jan", "good", "well", "d", "where", "rights", "public", "books", "high", "school", "through", "m", "each", "links", "she", "review", "years", "order", "very", "privacy", "book", "items", "company", "r", "read", "group", "sex", "need", "many", "user", "said", "de", "does", "set", "under", "general", "research", "university", "january", "mail", "full", "map", "reviews", "program", "life", "know", "games", "way", "days", "management", "p", "part", "could", "great", "united", "hotel", "real", "f", "item", "international", "center", "must", "store", "travel", "comments", "made", "development", "report", "off", "member", "details", "line", "terms", "before", "hotels", "did", "send", "right", "type", "because", "local", "those", "using", "results", "office", "education", "national", "car", "design", "take", "posted", "internet", "address", "community", "within", "states", "area", "want", "phone", "shipping", "reserved", "subject", "between", "forum", "family", "l", "long", "based", "w", "code", "show", "o", "even", "black", "check", "special", "prices", "index", "being", "women", "much", "sign", "file", "link", "open", "today", "technology", "south", "case", "project", "same", "pages", "uk", "version", "section", "own", "found", "sports", "house", "related", "security", "both", "g", "county", "american", "photo", "game", "members", "power", "while", "care", "network", "down", "computer", "systems", "three", "total", "place", "end", "following", "download", "h", "him", "without", "per", "access", "think", "north", "resources", "current", "posts", "big", "media", "law", "control", "water", "history", "pictures", "size", "art", "personal", "since", "including", "guide", "shop", "directory", "board", "location", "change", "white", "text", "small", "rating", "rate", "government", "children", "during", "usa", "return", "students", "v", "shopping", "account", "times", "sites", "level", "digital", "profile", "previous", "form", "events", "love", "old", "john", "main", "call", "hours", "image", "department", "title", "description", "non", "k", "y", "insurance", "another", "why", "shall", "property", "class", "cd", "still", "money", "quality", "every", "listing", "content", "country", "private", "little", "visit", "save", "tools", "low", "reply", "customer", "december", "compare", "movies", "include", "college", "value", "article", "york", "man", "card", "jobs", "provide", "j", "food", "source", "author", "different", "press", "u", "learn", "sale", "around", "print", "course", "job", "canada", "process", "teen", "room", "stock", "training", "too", "credit", "point", "join", "science", "men", "categories", "advanced", "west", "sales", "look", "english", "left", "team", "estate", "box", "conditions", "select", "windows", "gay", "thread", "week", "category", "note", "live", "large", "gallery", "table", "register", "however", "june", "october", "november", "market", "library", "really", "action", "start", "series", "model", "features", "air", "industry", "plan", "human", "provided", "tv", "yes", "required", "second", "hot", "accessories", "cost", "movie", "march", "la", "september", "better", "say", "questions", "july", "going", "medical", "test", "friend", "come", "dec", "study", "application", "cart", "staff", "articles", "san", "again", "play", "looking", "issues", "april", "never", "users", "complete", "street", "topic", "comment", "financial", "things", "working", "against", "standard", "tax", "person", "below", "mobile", "less", "got", "party", "payment", "equipment", "login", "student", "let", "programs", "offers", "legal", "above", "recent", "park", "stores", "side", "act", "problem", "red", "give", "memory", "performance", "social", "q", "august", "quote", "language", "story", "sell", "experience", "rates", "create", "key", "body", "young", "america", "important", "field", "few", "east", "paper", "single", "ii", "age", "activities", "club", "example", "girls", "additional", "password", "z", "latest", "something", "road", "gift", "question", "changes", "night", "ca", "hard", "texas", "oct", "pay", "four", "poker", "status", "browse", "issue", "range", "building", "seller", "court", "february", "always", "result", "light", "write", "war", "nov", "offer", "blue", "groups", "al", "easy", "given", "files", "event", "release", "analysis", "request", "china", "making", "picture", "needs", "possible", "might", "professional", "yet", "month", "major", "star", "areas", "future", "space", "committee", "hand", "sun", "cards", "problems", "london", "washington", "meeting", "become", "interest", "id", "child", "keep", "enter", "california", "share", "similar", "garden", "schools", "million", "added", "reference", "companies", "listed", "baby", "learning", "energy", "run", "delivery", "net", "popular", "term", "film", "stories", "put", "computers", "journal", "reports", "co", "try", "welcome", "central", "images", "president", "notice", "god", "original", "head", "radio", "until", "cell", "color", "self", "council", "away", "includes", "track", "australia", "discussion", "archive", "once", "others", "entertainment", "agreement", "format", "least", "society", "months", "log", "safety", "friends", "sure", "trade", "edition", "cars", "messages", "marketing", "tell", "further", "updated", "association", "able", "having", "provides", "david", "fun", "already", "green", "studies", "close", "common", "drive", "specific", "several", "gold", "feb", "living", "collection", "called", "short", "arts", "lot", "ask", "display", "limited", "solutions", "means", "director", "daily", "beach", "past", "natural", "whether", "due", "et", "five", "upon", "period", "planning", "says", "official", "weather", "mar", "land", "average", "done", "technical", "window", "france", "pro", "region", "island", "record", "direct", "conference", "environment", "records", "st", "district", "calendar", "costs", "style", "front", "statement", "parts", "aug", "ever", "early", "miles", "sound", "resource", "present", "applications", "either", "ago", "document", "word", "works", "material", "bill", "written", "talk", "federal", "rules", "final", "adult", "tickets", "thing", "centre", "requirements", "via", "cheap", "nude", "kids", "finance", "true", "minutes", "else", "mark", "third", "rock", "gifts", "europe", "reading", "topics", "bad", "individual", "tips", "plus", "auto", "cover", "usually", "edit", "together", "percent", "fast", "function", "fact", "unit", "getting", "global", "meet", "far", "economic", "en", "player", "projects", "lyrics", "often", "subscribe", "submit", "germany", "amount", "watch", "included", "feel", "though", "bank", "risk", "thanks", "everything", "deals", "various", "words", "jul", "production", "commercial", "james", "weight", "town", "heart", "advertising", "received", "choose", "treatment", "newsletter", "archives", "points", "knowledge", "magazine", "error", "camera", "girl", "currently", "construction", "toys", "registered", "clear", "golf", "receive", "domain", "methods", "chapter", "makes", "protection", "policies", "loan", "wide", "beauty", "manager", "india", "position", "taken", "sort", "models", "michael", "known", "half", "cases", "step", "engineering", "florida", "simple", "quick", "none", "wireless", "license", "paul", "friday", "lake", "whole", "annual", "published", "later", "basic", "shows", "corporate", "church", "method", "purchase", "customers", "active", "response", "practice", "hardware", "figure", "materials", "fire", "holiday", "chat", "enough", "designed", "along", "among", "death", "writing", "speed", "html", "countries", "loss", "face", "brand", "discount", "higher", "effects", "created", "remember", "standards", "oil", "bit", "yellow", "political", "increase", "advertise", "kingdom", "base", "near", "thought", "stuff", "french", "storage", "oh", "japan", "doing", "loans", "shoes", "entry", "stay", "nature", "orders", "availability", "africa", "summary", "turn", "mean", "growth", "notes", "agency", "king", "monday", "european", "activity", "copy", "although", "drug", "western", "income", "force", "cash", "employment", "overall", "bay", "river", "commission", "ad", "package", "contents", "seen", "players", "engine", "port", "album", "regional", "stop", "supplies", "started", "administration", "bar", "institute", "views", "plans", "double", "dog", "build", "screen", "exchange", "types", "soon", "lines", "electronic", "continue", "across", "benefits", "needed", "season", "apply", "someone", "held", "ny", "anything", "printer", "condition", "effective", "believe", "organization", "effect", "asked", "mind", "sunday", "selection", "casino", "lost", "tour", "menu", "volume", "cross", "anyone", "mortgage", "hope", "silver", "corporation", "wish", "inside", "solution", "mature", "role", "rather", "weeks", "addition", "came", "supply", "nothing", "certain", "executive", "running", "lower", "necessary", "union", "jewelry", "according", "dc", "clothing", "mon", "com", "particular", "fine", "names", "robert", "hour", "gas", "skills", "six", "bush", "islands", "advice", "career", "military", "rental", "decision", "leave", "british", "teens", "pre", "huge", "sat", "woman", "facilities", "zip", "bid", "kind", "sellers", "middle", "move", "cable", "opportunities", "taking", "values", "division", "coming", "tuesday", "object", "appropriate", "machine", "length", "actually", "nice", "score", "statistics", "client", "ok", "returns", "capital", "follow", "sample", "investment", "sent", "shown", "saturday", "christmas", "england", "culture", "band", "flash", "ms", "lead", "george", "choice", "went", "starting", "registration", "fri", "thursday", "courses", "consumer", "hi", "foreign", "artist", "outside", "furniture", "levels", "channel", "letter", "mode", "ideas", "wednesday", "structure", "fund", "summer", "allow", "degree", "contract", "button", "releases", "wed", "homes", "super", "male", "matter", "custom", "virginia", "almost", "took", "located", "multiple", "asian", "distribution", "editor", "inn", "industrial", "cause", "potential", "song", "ltd", "los", "focus", "late", "fall", "featured", "idea", "rooms", "female", "responsible", "inc", "communications", "win", "associated", "thomas", "primary", "cancer", "numbers", "reason", "tool", "browser", "spring", "foundation", "answer", "voice", "friendly", "schedule", "documents", "communication", "purpose", "feature", "bed", "comes", "police", "everyone", "independent", "approach", "brown", "physical", "operating", "hill", "maps", "medicine", "deal", "hold", "chicago", "forms", "glass", "happy", "tue", "smith", "wanted", "developed", "thank", "safe", "unique", "survey", "prior", "telephone", "sport", "ready", "feed", "animal", "sources", "mexico", "population", "pa", "regular", "secure", "navigation", "operations", "therefore", "ass", "simply", "evidence", "station", "christian", "round", "favorite", "understand", "option", "master", "valley", "recently", "probably", "sea", "built", "publications", "blood", "cut", "improve", "connection", "publisher", "hall", "larger", "networks", "earth", "parents", "impact", "transfer", "introduction", "kitchen", "strong", "tel", "carolina", "wedding", "properties", "hospital", "ground", "overview", "ship", "accommodation", "owners", "disease", "excellent", "paid", "italy", "perfect", "hair", "opportunity", "kit", "classic", "basis", "command", "cities", "william", "express", "award", "distance", "tree", "peter", "assessment", "ensure", "thus", "wall", "ie", "involved", "el", "extra", "especially", "pussy", "partners", "budget", "rated", "guides", "success", "maximum", "ma", "operation", "existing", "quite", "selected", "boy", "amazon", "patients", "restaurants", "beautiful", "warning", "wine", "locations", "horse", "vote", "forward", "flowers", "stars", "significant", "lists", "owner", "retail", "animals", "useful", "directly", "manufacturer", "ways", "est", "son", "providing", "rule", "mac", "housing", "takes", "iii", "bring", "catalog", "searches", "max", "trying", "mother", "authority", "considered", "told", "traffic", "programme", "joined", "strategy", "feet", "agent", "valid", "bin", "modern", "senior", "ireland", "teaching", "door", "grand", "testing", "trial", "charge", "units", "instead", "canadian", "cool", "normal", "wrote", "enterprise", "ships", "entire", "educational", "md", "leading", "metal", "positive", "fl", "fitness", "chinese", "opinion", "asia", "football", "abstract", "uses", "output", "funds", "mr", "greater", "likely", "develop", "employees", "artists", "alternative", "processing", "responsibility", "resolution", "java", "guest", "seems", "publication", "pass", "relations", "trust", "van", "contains", "session", "photography", "republic", "fees", "components", "vacation", "century", "academic", "assistance", "completed", "skin", "indian", "mary", "il", "expected", "ring", "grade", "dating", "pacific", "mountain", "organizations", "pop", "filter", "mailing", "vehicle", "longer", "consider", "int", "northern", "behind", "panel", "floor", "german", "buying", "match", "proposed", "default", "require", "iraq", "boys", "outdoor", "deep", "morning", "otherwise", "allows", "rest", "protein", "plant", "reported", "hit", "transportation", "mm", "pool", "politics", "partner", "disclaimer", "authors", "boards", "faculty", "parties", "fish", "membership", "mission", "eye", "string", "sense", "modified", "pack", "released", "stage", "internal", "goods", "recommended", "born", "unless", "richard", "detailed", "japanese", "race", "approved", "background", "target", "except", "character", "maintenance", "ability", "maybe", "functions", "ed", "moving", "brands", "places", "pretty", "spain", "southern", "yourself", "etc", "winter", "rape", "battery", "youth", "pressure", "submitted", "boston", "incest", "debt", "medium", "television", "interested", "core", "break", "purposes", "throughout", "sets", "dance", "wood", "itself", "defined", "papers", "playing", "awards", "fee", "studio", "reader", "virtual", "device", "established", "answers", "rent", "las", "remote", "dark", "external", "apple", "le", "regarding", "instructions", "min", "offered", "theory", "enjoy", "remove", "aid", "surface", "minimum", "visual", "host", "variety", "teachers", "martin", "manual", "block", "subjects", "agents", "increased", "repair", "fair", "civil", "steel", "understanding", "songs", "fixed", "wrong", "beginning", "hands", "associates", "finally", "classes", "paris", "ohio", "gets", "sector", "capacity", "requires", "jersey", "un", "fat", "fully", "father", "electric", "saw", "instruments", "quotes", "officer", "driver", "businesses", "dead", "respect", "unknown", "specified", "restaurant", "mike", "trip", "worth", "mi", "procedures", "poor", "teacher", "xxx", "eyes", "relationship", "workers", "farm", "georgia", "peace", "traditional", "campus", "tom", "showing", "creative", "coast", "benefit", "progress", "funding", "devices", "lord", "grant", "sub", "agree", "fiction", "hear", "sometimes", "watches", "careers", "beyond", "goes", "families", "led", "museum", "themselves", "fan", "transport", "interesting", "wife", "accepted", "former", "ten", "hits", "zone", "complex", "th", "cat", "galleries", "references", "die", "presented", "jack", "flat", "flow", "agencies", "literature", "respective", "parent", "spanish", "michigan", "columbia", "setting", "dr", "scale", "stand", "economy", "highest", "helpful", "monthly", "critical", "frame", "musical", "definition", "secretary", "path", "employee", "chief", "gives", "bottom", "magazines", "packages", "detail", "francisco", "laws", "changed", "pet", "heard", "begin", "individuals", "colorado", "royal", "clean", "switch", "russian", "largest", "african", "guy", "titles", "relevant", "guidelines", "justice", "bible", "cup", "basket", "applied", "weekly", "vol", "installation", "described", "demand", "pp", "suite", "na", "square", "chris", "attention", "advance", "skip", "diet", "army", "auction", "gear", "lee", "os", "difference", "allowed", "correct", "charles", "nation", "selling", "lots", "piece", "sheet", "firm", "seven", "older", "illinois", "regulations", "elements", "species", "jump", "cells", "resort", "facility", "random", "certificate", "minister", "motion", "looks", "fashion", "directions", "visitors", "monitor", "trading", "forest", "calls", "whose", "couple", "giving", "chance", "vision", "ball", "ending", "clients", "actions", "listen", "discuss", "accept", "naked", "goal", "successful", "sold", "wind", "communities", "clinical", "situation", "sciences", "markets", "lowest", "highly", "publishing", "appear", "emergency", "lives", "currency", "leather", "determine", "temperature", "palm", "announcements", "patient", "actual", "historical", "stone", "bob", "commerce", "perhaps", "persons", "difficult", "scientific", "satellite", "fit", "tests", "village", "accounts", "amateur", "ex", "met", "pain", "particularly", "factors", "coffee", "cum", "buyer", "cultural", "steve", "easily", "oral", "ford", "poster", "edge", "functional", "root", "au", "fi", "closed", "holidays", "ice", "pink", "zealand", "balance", "graduate", "replies", "shot", "architecture", "initial", "label", "thinking", "scott", "sec", "recommend", "canon", "league", "waste", "minute", "bus", "optional", "dictionary", "cold", "accounting", "manufacturing", "sections", "chair", "fishing", "effort", "phase", "fields", "bag", "fantasy", "po", "letters", "motor", "va", "professor", "context", "install", "shirt", "apparel", "generally", "continued", "foot", "mass", "crime", "count", "breast", "ibm", "johnson", "sc", "quickly", "dollars", "religion", "claim", "driving", "permission", "surgery", "patch", "heat", "wild", "measures", "generation", "kansas", "miss", "chemical", "doctor", "task", "reduce", "brought", "himself", "nor", "component", "enable", "exercise", "bug", "santa", "mid", "guarantee", "leader", "diamond", "israel", "se", "processes", "soft", "alone", "meetings", "seconds", "jones", "arizona", "interests", "flight", "congress", "fuel", "walk", "produced", "italian", "wait", "supported", "pocket", "saint", "rose", "freedom", "argument", "competition", "creating", "jim", "drugs", "joint", "premium", "fresh", "characters", "attorney", "di", "factor", "growing", "thousands", "km", "stream", "apartments", "pick", "hearing", "eastern", "entries", "dates", "generated", "signed", "upper", "administrative", "serious", "prime", "limit", "began", "louis", "steps", "errors", "shops", "bondage", "del", "efforts", "informed", "ga", "ac", "thoughts", "creek", "ft", "worked", "quantity", "urban", "practices", "sorted", "reporting", "essential", "myself", "tours", "platform", "load", "labor", "immediately", "nursing", "defense", "machines", "tags", "heavy", "covered", "recovery", "joe", "guys", "configuration", "cock", "merchant", "comprehensive", "expert", "universal", "protect", "drop", "solid", "presentation", "languages", "became", "orange", "compliance", "vehicles", "prevent", "theme", "rich", "im", "campaign", "marine", "improvement", "vs", "guitar", "finding", "pennsylvania", "examples", "saying", "spirit", "ar", "claims", "challenge", "acceptance", "mo", "seem", "affairs", "touch", "intended", "towards", "sa", "goals", "hire", "election", "suggest", "branch", "charges", "serve", "reasons", "magic", "mount", "smart", "talking", "gave", "ones", "latin", "avoid", "certified", "manage", "corner", "rank", "computing", "oregon", "element", "birth", "virus", "abuse", "requests", "separate", "quarter", "procedure", "leadership", "tables", "define", "racing", "religious", "facts", "breakfast", "kong", "column", "plants", "faith", "chain", "identify", "avenue", "missing", "died", "approximately", "domestic", "recommendations", "moved", "houston", "reach", "comparison", "mental", "viewed", "moment", "extended", "sequence", "inch", "attack", "sorry", "centers", "opening", "damage", "reserve", "recipes", "plastic", "produce", "snow", "placed", "truth", "counter", "failure", "follows", "eu", "dollar", "camp", "ontario", "automatically", "des", "minnesota", "films", "bridge", "native", "fill", "williams", "movement", "printing", "baseball", "owned", "approval", "draft", "chart", "played", "contacts", "cc", "jesus", "readers", "clubs", "wa", "jackson", "equal", "adventure", "matching", "offering", "shirts", "profit", "leaders", "posters", "institutions", "assistant", "variable", "ave", "advertisement", "expect", "headlines", "yesterday", "compared", "determined", "wholesale", "workshop", "russia", "gone", "codes", "kinds", "extension", "seattle", "statements", "golden", "completely", "teams", "fort", "cm", "wi", "lighting", "senate", "forces", "funny", "brother", "gene", "turned", "portable", "tried", "electrical", "applicable", "disc", "returned", "pattern", "boat", "named", "theatre", "earlier", "manufacturers", "sponsor", "classical", "warranty", "dedicated", "indiana", "direction", "harry", "objects", "ends", "delete", "evening", "assembly", "nuclear", "taxes", "mouse", "signal", "criminal", "issued", "brain", "sexual", "wisconsin", "powerful", "dream", "obtained", "false", "da", "cast", "flower", "felt", "personnel", "passed", "supplied", "identified", "falls", "pic", "soul", "aids", "opinions", "promote", "stated", "professionals", "appears", "carry", "flag", "decided", "covers", "hr", "em", "advantage", "hello", "designs", "maintain", "tourism", "priority", "newsletters", "adults", "savings", "iv", "graphic", "atom", "payments", "estimated", "binding", "brief", "ended", "winning", "eight", "anonymous", "iron", "straight", "script", "served", "wants", "miscellaneous", "prepared", "void", "dining", "alert", "integration", "atlanta", "dakota", "tag", "interview", "mix", "framework", "disk", "installed", "queen", "credits", "clearly", "fix", "handle", "sweet", "desk", "dave", "massachusetts", "diego", "hong", "vice", "associate", "ne", "truck", "behavior", "enlarge", "ray", "frequently", "revenue", "measure", "changing", "votes", "du", "duty", "looked", "discussions", "bear", "gain", "festival", "laboratory", "ocean", "flights", "experts", "signs", "lack", "depth", "iowa", "whatever", "vintage", "train", "exactly", "dry", "explore", "maryland", "spa", "concept", "nearly", "eligible", "reality", "forgot", "handling", "origin", "knew", "gaming", "feeds", "billion", "destination", "scotland", "faster", "intelligence", "dallas", "bought", "con", "ups", "nations", "route", "followed", "specifications", "broken", "frank", "alaska", "blow", "battle", "residential", "speak", "decisions", "industries", "protocol", "query", "clip", "partnership", "editorial", "nt", "expression", "es", "equity", "provisions", "speech", "wire", "principles", "suggestions", "rural", "shared", "sounds", "replacement", "tape", "strategic", "judge", "economics", "acid", "cent", "forced", "compatible", "fight", "apartment", "height", "null", "zero", "speaker", "filed", "netherlands", "obtain", "recreation", "offices", "designer", "remain", "managed", "pr", "failed", "marriage", "roll", "korea", "banks", "fr", "participants", "secret", "bath", "kelly", "leads", "negative", "austin", "favorites", "toronto", "theater", "springs", "missouri", "andrew", "var", "perform", "healthy", "translation", "estimates", "font", "assets", "injury", "mt", "joseph", "ministry", "drivers", "lawyer", "figures", "married", "protected", "proposal", "sharing", "philadelphia", "portal", "waiting", "birthday", "beta", "fail", "gratis", "banking", "officials", "brian", "toward", "won", "slightly", "assist", "conduct", "contained", "legislation", "calling", "serving", "bags", "miami", "comics", "matters", "houses", "doc", "postal", "relationships", "tennessee", "wear", "controls", "breaking", "combined", "ultimate", "wales", "representative", "frequency", "introduced", "minor", "finish", "departments", "residents", "noted", "displayed", "reduced", "physics", "rare", "spent", "performed", "extreme", "samples", "davis", "daniel", "bars", "reviewed", "row", "oz", "forecast", "removed", "helps", "administrator", "cycle", "contain", "accuracy", "dual", "rise", "sleep", "bird", "brazil", "creation", "static", "scene", "hunter", "addresses", "lady", "crystal", "famous", "writer", "chairman", "violence", "fans", "oklahoma", "speakers", "drink", "academy", "dynamic", "gender", "eat", "permanent", "agriculture", "dell", "cleaning", "portfolio", "practical", "delivered", "exclusive", "seat", "concerns", "colour", "vendor", "originally", "utilities", "philosophy", "regulation", "officers", "reduction", "aim", "bids", "referred", "supports", "nutrition", "recording", "regions", "junior", "toll", "les", "cape", "ann", "rings", "meaning", "tip", "secondary", "wonderful", "mine", "ladies", "henry", "ticket", "announced", "guess", "agreed", "prevention", "whom", "ski", "import", "posting", "presence", "instant", "mentioned", "automatic", "viewing", "maintained", "ch", "increasing", "majority", "connected", "christ", "dan", "dogs", "sd", "directors", "aspects", "austria", "ahead", "moon", "participation", "scheme", "utility", "fly", "manner", "matrix", "containing", "combination", "amendment", "despite", "strength", "guaranteed", "turkey", "libraries", "proper", "distributed", "degrees", "singapore", "enterprises", "delta", "fear", "seeking", "inches", "phoenix", "convention", "shares", "principal", "daughter", "standing", "comfort", "colors", "wars", "ordering", "kept", "alpha", "appeal", "cruise", "bonus", "previously", "hey", "buildings", "beat", "disney", "household", "batteries", "adobe", "smoking", "becomes", "drives", "arms", "alabama", "tea", "improved", "trees", "achieve", "positions", "dress", "subscription", "dealer", "contemporary", "sky", "utah", "nearby", "rom", "carried", "happen", "exposure", "hide", "signature", "gambling", "refer", "miller", "provision", "outdoors", "clothes", "caused", "luxury", "babes", "frames", "certainly", "indeed", "newspaper", "toy", "circuit", "layer", "printed", "slow", "removal", "easier", "liability", "trademark", "hip", "printers", "nine", "adding", "kentucky", "mostly", "eric", "spot", "taylor", "prints", "spend", "factory", "interior", "grow", "americans", "optical", "promotion", "relative", "amazing", "clock", "dot", "hiv", "identity", "suites", "conversion", "feeling", "hidden", "reasonable", "victoria", "serial", "relief", "revision", "influence", "ratio", "importance", "rain", "onto", "planet", "copies", "recipe", "zum", "permit", "seeing", "proof", "tennis", "bass", "prescription", "bedroom", "empty", "instance", "hole", "pets", "ride", "licensed", "orlando", "specifically", "tim", "bureau", "maine", "represent", "conservation", "pair", "ideal", "recorded", "don", "pieces", "finished", "parks", "dinner", "lawyers", "sydney", "stress", "cream", "runs", "trends", "discover", "ap", "patterns", "boxes", "louisiana", "hills", "fourth", "nm", "advisor", "mn", "marketplace", "nd", "evil", "aware", "wilson", "shape", "evolution", "irish", "certificates", "objectives", "stations", "suggested", "op", "remains", "greatest", "firms", "concerned", "operator", "structures", "generic", "usage", "cap", "ink", "charts", "continuing", "mixed", "census", "peak", "competitive", "exist", "wheel", "transit", "dick", "salt", "compact", "poetry", "lights", "tracking", "angel", "bell", "keeping", "preparation", "attempt", "receiving", "matches", "accordance", "width", "noise", "engines", "forget", "array", "discussed", "accurate", "stephen", "elizabeth", "climate", "reservations", "pin", "alcohol", "greek", "instruction", "managing", "sister", "raw", "differences", "walking", "explain", "smaller", "newest", "establish", "happened", "expressed", "jeff", "extent", "sharp", "ben", "lane", "paragraph", "kill", "mathematics", "compensation", "ce", "export", "managers", "aircraft", "sweden", "conflict", "conducted", "versions", "employer", "occur", "percentage", "knows", "mississippi", "describe", "concern", "requested", "citizens", "connecticut", "heritage", "immediate", "holding", "trouble", "spread", "coach", "agricultural", "expand", "supporting", "audience", "assigned", "jordan", "collections", "ages", "participate", "plug", "specialist", "cook", "affect", "virgin", "experienced", "investigation", "raised", "hat", "institution", "directed", "dealers", "searching", "sporting", "helping", "affected", "lib", "totally", "plate", "expenses", "indicate", "blonde", "ab", "proceedings", "favourite", "transmission", "anderson", "characteristics", "der", "lose", "organic", "seek", "experiences", "cheats", "extremely", "contracts", "guests", "diseases", "concerning", "equivalent", "chemistry", "tony", "neighborhood", "nevada", "thailand", "anyway", "continues", "tracks", "advisory", "cam", "curriculum", "logic", "prince", "circle", "soil", "grants", "anywhere", "psychology", "responses", "atlantic", "wet", "circumstances", "edward", "identification", "ram", "leaving", "appliances", "matt", "cooking", "speaking", "fox", "respond", "sizes", "plain", "exit", "entered", "iran", "arm", "keys", "launch", "wave", "checking", "costa", "belgium", "holy", "acts", "guidance", "mesh", "trail", "enforcement", "symbol", "crafts", "highway", "buddy", "observed", "dean", "poll", "glossary", "fiscal", "celebrity", "styles", "denver", "unix", "filled", "bond", "channels", "appendix", "notify", "blues", "chocolate", "pub", "portion", "scope", "hampshire", "cables", "cotton", "controlled", "requirement", "authorities", "biology", "dental", "killed", "border", "ancient", "debate", "representatives", "starts", "pregnancy", "causes", "arkansas", "biography", "leisure", "attractions", "learned", "transactions", "notebook", "explorer", "historic", "attached", "opened", "husband", "disabled", "authorized", "crazy", "britain", "concert", "retirement", "financing", "efficiency", "sp", "comedy", "adopted", "efficient", "linear", "commitment", "specialty", "bears", "jean", "hop", "carrier", "edited", "constant", "visa", "mouth", "jewish", "meter", "linked", "portland", "interviews", "concepts", "gun", "reflect", "pure", "deliver", "wonder", "hell", "lessons", "fruit", "begins", "qualified", "reform", "lens", "treated", "discovery", "draw", "classified", "relating", "assume", "confidence", "alliance", "fm", "confirm", "warm", "neither", "lewis", "howard", "leaves", "engineer", "consistent", "replace", "clearance", "connections", "inventory", "suck", "organisation", "babe", "checks", "reached", "becoming", "objective", "indicated", "sugar", "crew", "legs", "sam", "stick", "securities", "allen", "relation", "enabled", "genre", "slide", "montana", "volunteer", "tested", "rear", "democratic", "enhance", "switzerland", "exact", "bound", "formal", "dimensions", "contribute", "lock", "storm", "colleges", "mile", "showed", "challenges", "editors", "mens", "threads", "bowl", "supreme", "brothers", "recognition", "presents", "ref", "tank", "submission", "dolls", "estimate", "encourage", "navy", "kid", "inspection", "consumers", "cancel", "limits", "territory", "transaction", "manchester", "weapons", "paint", "delay", "pilot", "outlet", "contributions", "continuous", "czech", "resulting", "cambridge", "initiative", "novel", "pan", "execution", "disability", "increases", "ultra", "winner", "idaho", "contractor", "episode", "examination", "potter", "dish", "plays", "bulletin", "ia", "pt", "indicates", "modify", "oxford", "adam", "truly", "painting", "committed", "extensive", "universe", "candidate", "patent", "slot", "outstanding", "ha", "eating", "perspective", "planned", "watching", "lodge", "messenger", "mirror", "tournament", "consideration", "sterling", "sessions", "kernel", "stocks", "buyers", "journals", "gray", "catalogue", "ea", "antonio", "charged", "broad", "taiwan", "und", "chosen", "greece", "swiss", "sarah", "clark", "labour", "hate", "terminal", "publishers", "nights", "behalf", "caribbean", "liquid", "rice", "nebraska", "loop", "salary", "reservation", "foods", "guard", "properly", "orleans", "saving", "remaining", "empire", "resume", "twenty", "newly", "raise", "prepare", "gary", "depending", "illegal", "expansion", "vary", "hundreds", "rome", "arab", "lincoln", "helped", "premier", "tomorrow", "purchased", "milk", "decide", "consent", "drama", "visiting", "performing", "downtown", "keyboard", "contest", "collected", "nw", "bands", "boot", "suitable", "ff", "absolutely", "millions", "lunch", "audit", "push", "chamber", "guinea", "findings", "muscle", "iso", "implement", "clicking", "scheduled", "polls", "typical", "tower", "yours", "sum", "significantly", "chicken", "temporary", "attend", "shower", "alan", "sending", "jason", "tonight", "dear", "sufficient", "shell", "province", "catholic", "oak", "vat", "vancouver", "governor", "beer", "seemed", "contribution", "measurement", "swimming", "formula", "constitution", "solar", "jose", "catch", "jane", "pakistan", "ps", "reliable", "consultation", "northwest", "sir", "doubt", "earn", "finder", "unable", "periods", "classroom", "tasks", "democracy", "attacks", "kim", "merchandise", "const", "resistance", "doors", "symptoms", "resorts", "memorial", "visitor", "twin", "forth", "insert", "baltimore", "gateway", "ky", "dont", "drawing", "candidates", "charlotte", "ordered", "biological", "fighting", "transition", "happens", "preferences", "spy", "romance", "instrument", "bruce", "split", "themes", "powers", "heaven", "br", "bits", "pregnant", "twice", "classification", "focused", "egypt", "physician", "bargain", "cellular", "norway", "vermont", "asking", "blocks", "normally", "lo", "spiritual", "hunting", "suit", "shift", "chip", "res", "sit", "bodies", "photographs", "cutting", "simon", "writers", "marks", "flexible", "loved", "favourites", "numerous", "relatively", "birds", "satisfaction", "represents", "char", "pittsburgh", "superior", "preferred", "saved", "paying", "cartoon", "shots", "intellectual", "moore", "granted", "choices", "carbon", "spending", "comfortable", "magnetic", "interaction", "listening", "effectively", "registry", "crisis", "outlook", "massive", "denmark", "employed", "bright", "treat", "header", "cs", "poverty", "formed", "piano", "echo", "que", "sheets", "patrick", "experimental", "puerto", "revolution", "consolidation", "displays", "allowing", "earnings", "mystery", "landscape", "dependent", "mechanical", "journey", "delaware", "bidding", "risks", "banner", "applicant", "charter", "fig", "barbara", "cooperation", "counties", "acquisition", "ports", "directories", "recognized", "dreams", "notification", "licensing", "stands", "teach", "occurred", "rapid", "pull", "hairy", "diversity", "cleveland", "ut", "reverse", "deposit", "investments", "wheels", "specify", "dutch", "sensitive", "formats", "depends", "boots", "holds", "si", "editing", "poland", "completion", "pulse", "universities", "technique", "contractors", "voting", "courts", "notices", "subscriptions", "calculate", "detroit", "alexander", "broadcast", "converted", "anniversary", "improvements", "strip", "specification", "pearl", "accident", "nick", "accessible", "accessory", "resident", "plot", "possibly", "typically", "representation", "regard", "pump", "exists", "arrangements", "smooth", "conferences", "strike", "consumption", "birmingham", "flashing", "narrow", "afternoon", "threat", "surveys", "sitting", "putting", "controller", "ownership", "committees", "penis", "legislative", "vietnam", "trailer", "anne", "castle", "gardens", "missed", "malaysia", "antique", "labels", "willing", "molecular", "acting", "heads", "stored", "residence", "attorneys", "antiques", "density", "hundred", "ryan", "operators", "strange", "philippines", "statistical", "beds", "breasts", "mention", "innovation", "employers", "grey", "parallel", "amended", "operate", "bills", "bold", "bathroom", "stable", "opera", "definitions", "von", "doctors", "lesson", "asset", "scan", "elections", "drinking", "reaction", "blank", "enhanced", "entitled", "severe", "generate", "stainless", "newspapers", "hospitals", "vi", "humor", "aged", "exception", "lived", "duration", "bulk", "successfully", "indonesia", "pursuant", "fabric", "visits", "primarily", "tight", "domains", "capabilities", "contrast", "recommendation", "flying", "sin", "berlin", "cute", "organized", "ba", "para", "adoption", "improving", "cr", "expensive", "meant", "capture", "pounds", "buffalo", "organisations", "plane", "pg", "explained", "seed", "programmes", "desire", "mechanism", "camping", "ee", "jewellery", "meets", "welfare", "peer", "caught", "eventually", "marked", "driven", "measured", "bottle", "agreements", "considering", "marshall", "massage", "rubber", "conclusion", "closing", "thousand", "meat", "legend", "grace", "susan", "ing", "adams", "monster", "alex", "bang", "villa", "bone", "columns", "disorders", "bugs", "collaboration", "hamilton", "detection", "ftp", "cookies", "inner", "formation", "med", "engineers", "entity", "gate", "holder", "proposals", "sw", "settlement", "portugal", "lawrence", "roman", "duties", "valuable", "erotic", "tone", "ethics", "forever", "dragon", "busy", "captain", "fantastic", "imagine", "brings", "heating", "leg", "neck", "hd", "wing", "governments", "purchasing", "appointed", "taste", "dealing", "commit", "tiny", "rail", "liberal", "jay", "trips", "gap", "sides", "tube", "turns", "corresponding", "descriptions", "cache", "belt", "jacket", "determination", "animation", "oracle", "er", "matthew", "lease", "productions", "aviation", "proud", "excess", "disaster", "console", "commands", "jr", "instructor", "giant", "achieved", "injuries", "shipped", "seats", "approaches", "alarm", "anthony", "usual", "loading", "stamps", "appeared", "franklin", "angle", "rob", "mining", "melbourne", "worst", "betting", "scientists", "liberty", "wyoming", "argentina", "era", "convert", "possibility", "commissioner", "dangerous", "garage", "exciting", "thongs", "unfortunately", "respectively", "volunteers", "attachment", "finland", "morgan", "derived", "pleasure", "honor", "asp", "eagle", "pants", "columbus", "nurse", "prayer", "appointment", "workshops", "hurricane", "quiet", "luck", "postage", "producer", "represented", "mortgages", "dial", "responsibilities", "cheese", "comic", "carefully", "jet", "productivity", "investors", "crown", "par", "underground", "diagnosis", "maker", "crack", "principle", "picks", "vacations", "gang", "calculated", "fetish", "appearance", "smoke", "apache", "incorporated", "craft", "cake", "apart", "fellow", "blind", "lounge", "mad", "coins", "andy", "gross", "strongly", "cafe", "valentine", "hilton", "ken", "horror", "su", "familiar", "capable", "douglas", "till", "involving", "pen", "investing", "christopher", "admission", "shoe", "elected", "carrying", "victory", "sand", "madison", "joy", "editions", "mainly", "ethnic", "ran", "parliament", "actor", "finds", "seal", "situations", "fifth", "citizen", "vertical", "corrections", "structural", "municipal", "describes", "prize", "sr", "occurs", "jon", "absolute", "disabilities", "consists", "substance", "prohibited", "addressed", "lies", "pipe", "soldiers", "guardian", "lecture", "simulation", "ill", "concentration", "classics", "lbs", "lay", "interpretation", "horses", "dirty", "deck", "wayne", "donate", "taught", "bankruptcy", "worker", "alive", "temple", "substances", "prove", "discovered", "wings", "breaks", "restrictions", "participating", "waters", "promise", "thin", "exhibition", "prefer", "ridge", "cabinet", "harris", "bringing", "sick", "dose", "tiffany", "tropical", "collect", "bet", "composition", "streets", "definitely", "shaved", "turning", "buffer", "purple", "existence", "commentary", "larry", "developments", "def", "immigration", "lets", "mutual", "necessarily", "syntax", "li", "attribute", "prison", "skill", "chairs", "nl", "everyday", "apparently", "surrounding", "mountains", "moves", "popularity", "inquiry", "checked", "exhibit", "throw", "trend", "sierra", "visible", "cats", "desert", "ya", "oldest", "rhode", "obviously", "mercury", "steven", "handbook", "greg", "navigate", "worse", "summit", "victims", "spaces", "fundamental", "burning", "escape", "somewhat", "receiver", "substantial", "tr", "progressive", "boats", "glance", "scottish", "championship", "arcade", "richmond", "sacramento", "impossible", "russell", "tells", "obvious", "fiber", "depression", "graph", "covering", "platinum", "judgment", "bedrooms", "talks", "filing", "foster", "passing", "awarded", "testimonials", "trials", "tissue", "nz", "clinton", "masters", "bonds", "cartridge", "explanation", "folk", "commons", "cincinnati", "subsection", "fraud", "electricity", "permitted", "spectrum", "arrival", "pottery", "emphasis", "roger", "aspect", "awesome", "mexican", "confirmed", "counts", "priced", "hist", "crash", "lift", "desired", "inter", "closer", "assumes", "heights", "shadow", "riding", "infection", "lisa", "expense", "grove", "venture", "korean", "healing", "princess", "mall", "entering", "packet", "spray", "studios", "dad", "buttons", "observations", "thompson", "winners", "extend", "roads", "subsequent", "pat", "dublin", "rolling", "fell", "yard", "disclosure", "establishment", "memories", "nelson", "te", "arrived", "creates", "faces", "tourist", "cocks", "av", "mayor", "murder", "sean", "adequate", "senator", "yield", "grades", "cartoons", "pour", "digest", "reg", "lodging", "tion", "dust", "hence", "entirely", "replaced", "rescue", "undergraduate", "losses", "combat", "reducing", "stopped", "occupation", "lakes", "butt", "donations", "associations", "closely", "radiation", "diary", "seriously", "kings", "shooting", "kent", "adds", "ear", "flags", "baker", "launched", "elsewhere", "pollution", "conservative", "shock", "effectiveness", "walls", "abroad", "ebony", "tie", "ward", "drawn", "arthur", "ian", "visited", "roof", "walker", "demonstrate", "atmosphere", "suggests", "kiss", "beast", "ra", "operated", "experiment", "targets", "overseas", "purchases", "dodge", "counsel", "federation", "invited", "yards", "assignment", "chemicals", "gordon", "mod", "farmers", "queries", "rush", "ukraine", "absence", "nearest", "cluster", "vendors", "whereas", "yoga", "serves", "woods", "surprise", "lamp", "rico", "partial", "phil", "everybody", "couples", "nashville", "ranking", "jokes", "http", "simpson", "sublime", "palace", "acceptable", "satisfied", "glad", "wins", "measurements", "verify", "globe", "trusted", "copper", "milwaukee", "rack", "warehouse", "ec", "rep", "kerry", "receipt", "supposed", "ordinary", "nobody", "ghost", "violation", "stability", "mit", "applying", "southwest", "boss", "pride", "expectations", "independence", "knowing", "reporter", "keith", "champion", "cloudy", "linda", "ross", "personally", "chile", "anna", "plenty", "solo", "sentence", "throat", "ignore", "maria", "uniform", "excellence", "wealth", "tall", "somewhere", "vacuum", "dancing", "attributes", "recognize", "brass", "writes", "plaza", "survival", "quest", "publish", "screening", "toe", "trans", "jonathan", "whenever", "nova", "lifetime", "pioneer", "booty", "forgotten", "plates", "acres", "venue", "athletic", "essays", "behaviour", "vital", "telling", "fairly", "coastal", "cf", "charity", "intelligent", "edinburgh", "vt", "excel", "modes", "obligation", "campbell", "wake", "stupid", "harbor", "hungary", "traveler", "segment", "realize", "lan", "enemy", "puzzle", "rising", "aluminum", "wells", "opens", "insight", "restricted", "republican", "secrets", "lucky", "latter", "merchants", "thick", "repeat", "philips", "attendance", "penalty", "drum", "glasses", "enables", "nec", "builder", "vista", "jessica", "chips", "terry", "flood", "ease", "arguments", "amsterdam", "orgy", "arena", "adventures", "pupils", "stewart", "announcement", "outcome", "xx", "appreciate", "expanded", "casual", "grown", "polish", "lovely", "extras", "centres", "jerry", "clause", "smile", "lands", "ri", "troops", "indoor", "bulgaria", "armed", "broker", "charger", "regularly", "believed", "pine", "cooling", "tend", "gulf", "rick", "trucks", "cp", "mechanisms", "divorce", "laura", "tokyo", "partly", "tradition", "candy", "pills", "tiger", "donald", "folks", "exposed", "hunt", "angels", "deputy", "sealed", "physicians", "loaded", "fred", "complaint", "scenes", "experiments", "balls", "afghanistan", "scholarship", "governance", "mill", "founded", "chronic", "moral", "den", "finger", "keeps", "pound", "locate", "pl", "trained", "burn", "roses", "ourselves", "bread", "tobacco", "wooden", "motors", "tough", "roberts", "incident", "gonna", "lie", "conversation", "decrease", "chest", "pension", "billy", "revenues", "emerging", "worship", "capability", "ak", "fe", "craig", "herself", "producing", "churches", "precision", "damages", "reserves", "contributed", "solve", "reproduction", "minority", "diverse", "ingredients", "sb", "ah", "johnny", "sole", "franchise", "recorder", "complaints", "facing", "nancy", "promotions", "tones", "passion", "rehabilitation", "maintaining", "sight", "laid", "clay", "defence", "patches", "weak", "refund", "towns", "divided", "reception", "wise", "cyprus", "odds", "correctly", "consequences", "makers", "hearts", "geography", "appearing", "integrity", "worry", "discrimination", "eve", "carter", "legacy", "marc", "pleased", "danger", "widely", "phrase", "genuine", "raising", "implications", "paradise", "hybrid", "reads", "roles", "emotional", "sons", "leaf", "pad", "glory", "platforms", "ja", "bigger", "versus", "combine", "overnight", "geographic", "exceed", "rod", "saudi", "fault", "cuba", "hrs", "preliminary", "districts", "introduce", "silk", "kate", "babies", "bi", "karen", "compiled", "romantic", "revealed", "specialists", "generator", "albert", "examine", "jimmy", "graham", "suspension", "bristol", "margaret", "sad", "correction", "wolf", "slowly", "communicate", "rugby", "supplement", "cal", "portions", "infant", "promoting", "samuel", "fluid", "grounds", "fits", "kick", "regards", "meal", "ta", "hurt", "machinery", "unlike", "equation", "baskets", "probability", "pot", "dimension", "wright", "barry", "proven", "admissions", "warren", "slip", "studied", "reviewer", "involves", "quarterly", "profits", "devil", "grass", "comply", "marie", "illustrated", "cherry", "continental", "alternate", "deutsch", "achievement", "limitations", "kenya", "cuts", "funeral", "earrings", "enjoyed", "chapters", "charlie", "quebec", "passenger", "convenient", "dennis", "mars", "francis", "sized", "noticed", "socket", "silent", "literary", "egg", "signals", "caps", "pill", "theft", "childhood", "swing", "symbols", "lat", "meta", "humans", "facial", "choosing", "talent", "dated", "flexibility", "seeker", "wisdom", "shoot", "boundary", "mint", "offset", "philip", "elite", "gi", "spin", "holders", "believes", "swedish", "poems", "jurisdiction", "displaying", "witness", "collins", "equipped", "stages", "encouraged", "sur", "winds", "powder", "broadway", "acquired", "wash", "cartridges", "stones", "entrance", "roots", "declaration", "losing", "attempts", "noble", "glasgow", "rev", "gospel", "advantages", "shore", "loves", "induced", "ll", "knight", "preparing", "loose", "aims", "recipient", "linking", "extensions", "appeals", "earned", "illness", "islamic", "athletics", "southeast", "ho", "alternatives", "pending", "parker", "determining", "lebanon", "kennedy", "sh", "soap", "ae", "triple", "cooper", "vincent", "jam", "secured", "unusual", "answered", "destruction", "increasingly", "migration", "disorder", "routine", "rocks", "conventional", "titans", "applicants", "wearing", "axis", "sought", "mounted", "habitat", "median", "guns", "herein", "animated", "horny", "judicial", "rio", "adjustment", "hero", "bachelor", "attitude", "engaged", "falling", "montreal", "carpet", "lenses", "binary", "attended", "difficulty", "collective", "coalition", "pi", "dropped", "duke", "walter", "ai", "pace", "besides", "wage", "producers", "ot", "collector", "arc", "hosts", "moments", "atlas", "strings", "dawn", "representing", "observation", "feels", "torture", "carl", "coat", "mitchell", "mrs", "restoration", "convenience", "returning", "ralph", "opposition", "container", "yr", "defendant", "warner", "confirmation", "app", "embedded", "supervisor", "wizard", "corps", "actors", "liver", "liable", "morris", "petition", "recall", "picked", "assumed", "departure", "minneapolis", "belief", "killing", "memphis", "shoulder", "texts", "brokers", "roy", "ion", "diameter", "ottawa", "doll", "ic", "tit", "seasons", "peru", "refine", "bidder", "singer", "evans", "herald", "literacy", "fails", "aging", "intervention", "fed", "attraction", "diving", "invite", "modification", "alice", "suppose", "reed", "involve", "moderate", "terror", "younger", "thirty", "mice", "opposite", "understood", "rapidly", "ban", "mercedes", "assurance", "clerk", "happening", "vast", "mills", "outline", "amendments", "holland", "receives", "metropolitan", "compilation", "verification", "ent", "odd", "wrap", "refers", "mood", "favor", "veterans", "gr", "attractive", "occasion", "jefferson", "victim", "demands", "sleeping", "careful", "beam", "gardening", "obligations", "arrive", "orchestra", "sunset", "tracked", "moreover", "lottery", "tops", "framed", "aside", "licence", "essay", "discipline", "amy", "dialogue", "identifying", "alphabetical", "camps", "declared", "dispatched", "aaron", "trace", "disposal", "shut", "packs", "ge", "switches", "romania", "voluntary", "thou", "consult", "greatly", "mask", "midnight", "ng", "commonly", "pe", "photographer", "inform", "turkish", "coal", "cry", "quantum", "murray", "intent", "tt", "zoo", "largely", "pleasant", "announce", "constructed", "additions", "requiring", "spoke", "arrow", "engagement", "rough", "weird", "tee", "lion", "inspired", "holes", "weddings", "blade", "suddenly", "oxygen", "meals", "canyon", "meters", "merely", "arrangement", "conclusions", "passes", "bibliography", "pointer", "stretch", "durham", "furthermore", "permits", "cooperative", "muslim", "xl", "neil", "sleeve", "cleaner", "cricket", "beef", "feeding", "stroke", "township", "cad", "hats", "robin", "robinson", "jacksonville", "strap", "headquarters", "sharon", "crowd", "transfers", "surf", "olympic", "transformation", "remained", "attachments", "dir", "entities", "customs", "administrators", "personality", "rainbow", "hook", "roulette", "decline", "gloves", "cord", "cloud", "facilitate", "subscriber", "valve", "val", "explains", "proceed", "feelings", "knife", "jamaica", "shelf", "liked", "adopt", "denied", "incredible", "donation", "outer", "crop", "deaths", "rivers", "commonwealth", "manhattan", "tales", "katrina", "islam", "tu", "fy", "thumbs", "seeds", "cited", "lite", "hub", "realized", "twelve", "founder", "decade", "dispute", "portuguese", "tired", "adverse", "everywhere", "eng", "steam", "discharge", "ef", "drinks", "ace", "voices", "acute", "climbing", "stood", "sing", "tons", "perfume", "carol", "honest", "albany", "hazardous", "restore", "stack", "somebody", "sue", "ep", "reputation", "democrats", "hang", "curve", "creator", "amber", "qualifications", "museums", "variation", "passage", "transferred", "trunk", "lb", "damn", "pierre", "photograph", "oakland", "colombia", "waves", "camel", "lamps", "underlying", "hood", "wrestling", "suicide", "chi", "arabia", "gathering", "projection", "juice", "chase", "mathematical", "logical", "sauce", "fame", "extract", "specialized", "panama", "indianapolis", "af", "payable", "corporations", "courtesy", "criticism", "automobile", "confidential", "statutory", "accommodations", "athens", "northeast", "judges", "retired", "remarks", "detected", "decades", "paintings", "walked", "arising", "bracelet", "ins", "eggs", "juvenile", "injection", "yorkshire", "populations", "protective", "afraid", "railway", "indicator", "pointed", "causing", "mistake", "norton", "locked", "eliminate", "fusion", "mineral", "ruby", "steering", "beads", "fortune", "preference", "canvas", "threshold", "parish", "claimed", "screens", "cemetery", "croatia", "flows", "venezuela", "exploration", "fewer", "nurses", "stem", "proxy", "astronomy", "lanka", "edwards", "drew", "contests", "translate", "announces", "costume", "berkeley", "voted", "killer", "gates", "adjusted", "rap", "tune", "bishop", "pulled", "corn", "shaped", "compression", "seasonal", "establishing", "farmer", "counters", "puts", "constitutional", "grew", "perfectly", "tin", "slave", "instantly", "cultures", "norfolk", "coaching", "examined", "trek", "encoding", "litigation", "heroes", "painted", "ir", "horizontal", "resulted", "portrait", "ethical", "carriers", "mobility", "floral", "builders", "ties", "struggle", "schemes", "suffering", "neutral", "fisher", "rat", "spears", "prospective", "bedding", "ultimately", "joining", "heading", "equally", "artificial", "bearing", "spectacular", "seniors", "worlds", "guilty", "affiliated", "naturally", "haven", "tablet", "jury", "dos", "tail", "subscribers", "charm", "lawn", "violent", "underwear", "basin", "soup", "potentially", "ranch", "crossing", "inclusive", "cottage", "drunk", "considerable", "crimes", "resolved", "byte", "nose", "branches", "delhi", "holdings", "alien", "selecting", "processors", "broke", "nepal", "zimbabwe", "difficulties", "juan", "complexity", "constantly", "browsing", "resolve", "barcelona", "presidential", "documentary", "cod", "territories", "melissa", "moscow", "thesis", "thru", "jews", "discs", "rocky", "bargains", "frequent", "nigeria", "ceiling", "ensuring", "legislature", "hospitality", "gen", "anybody", "diamonds", "fleet", "bunch", "singing", "theoretical", "afford", "exercises", "surveillance", "quit", "distinct", "lung", "substitute", "inclusion", "hopefully", "brilliant", "turner", "sucking", "cents", "ti", "todd", "spoken", "stayed", "civic", "manuals", "sees", "termination", "watched", "thereof", "households", "redeem", "rogers", "grain", "authentic", "regime", "wishes", "bull", "montgomery", "architectural", "louisville", "depend", "differ", "movements", "ranging", "monica", "repairs", "breath", "amenities", "virtually", "cole", "mart", "candle", "hanging", "colored", "authorization", "tale", "verified", "lynn", "formerly", "bp", "situated", "comparative", "seeks", "loving", "strictly", "routing", "docs", "stanley", "psychological", "surprised", "elegant", "gains", "renewal", "genealogy", "opposed", "deemed", "scoring", "expenditure", "brooklyn", "liverpool", "sisters", "critics", "spots", "oo", "hacker", "madrid", "similarly", "margin", "coin", "solely", "fake", "salon", "norman", "excluding", "headed", "voters", "cure", "madonna", "commander", "arch", "ni", "murphy", "thinks", "suggestion", "soldier", "phillips", "aimed", "justin", "bomb", "harm", "interval", "mirrors", "tricks", "brush", "investigate", "thy", "panels", "repeated", "assault", "spare", "deer", "tongue", "bowling", "tri", "pal", "monkey", "proportion", "filename", "skirt", "florence", "invest", "honey", "um", "analyses", "drawings", "significance", "ye", "lovers", "atomic", "arabic", "gauge", "essentials", "junction", "protecting", "faced", "mat", "rachel", "solving", "transmitted", "produces", "oven", "ted", "intensive", "chains", "kingston", "sixth", "engage", "noon", "switching", "quoted", "correspondence", "farms", "imports", "supervision", "cheat", "bronze", "expenditures", "sandy", "separation", "testimony", "suspect", "celebrities", "sender", "boundaries", "crucial", "celebration", "adjacent", "filtering", "tuition", "spouse", "exotic", "threats", "luxembourg", "puzzles", "reaching", "vb", "damaged", "laugh", "joel", "surgical", "destroy", "citation", "pitch", "yo", "premises", "perry", "proved", "offensive", "imperial", "dozen", "benjamin", "teeth", "cloth", "studying", "colleagues", "stamp", "lotus", "salmon", "olympus", "separated", "cargo", "tan", "salem", "mate", "likes", "butter", "pepper", "weapon", "luggage", "burden", "chef", "zones", "races", "isle", "stylish", "slim", "maple", "luke", "grocery", "offshore", "depot", "kenneth", "comp", "alt", "pie", "blend", "harrison", "julie", "occasionally", "attending", "emission", "pete", "finest", "janet", "bow", "penn", "recruiting", "apparent", "autumn", "traveling", "probe", "midi", "toilet", "ranked", "jackets", "routes", "packed", "excited", "helen", "mounting", "recover", "tied", "balanced", "prescribed", "catherine", "timely", "talked", "delayed", "chuck", "reproduced", "hon", "dale", "explicit", "calculation", "villas", "ebook", "consolidated", "occasions", "brooks", "newton", "oils", "sept", "exceptional", "anxiety", "whilst", "unto", "prompt", "precious", "minds", "annually", "considerations", "pays", "cox", "fingers", "sunny", "ebooks", "delivers", "je", "queensland", "necklace", "musicians", "leeds", "composite", "cedar", "arranged", "lang", "theaters", "advocacy", "raleigh", "stud", "fold", "essentially", "designing", "threaded", "uv", "qualify", "fingering", "blair", "hopes", "mason", "diagram", "burns", "pumps", "slut", "ejaculation", "sg", "vic", "peoples", "victor", "mario", "pos", "attach", "licenses", "removing", "advised", "brunswick", "spider", "ranges", "pairs", "trails", "preservation", "hudson", "isolated", "interim", "assisted", "divine", "streaming", "approve", "chose", "compound", "intensity", "technological", "syndicate", "abortion", "venues", "blast", "calcium", "newport", "addressing", "pole", "discounted", "indians", "shield", "harvest", "membrane", "prague", "bangladesh", "constitute", "locally", "concluded", "desperate", "mothers", "iceland", "demonstration", "governmental", "manufactured", "candles", "graduation", "bend", "sailing", "variations", "sacred", "morocco", "tommy", "springfield", "refused", "brake", "exterior", "greeting", "oliver", "congo", "glen", "delays", "synthesis", "olive", "undefined", "unemployment", "scored", "newcastle", "velocity", "relay", "composed", "tears", "performances", "oasis", "cab", "angry", "fa", "societies", "brazilian", "identical", "petroleum", "compete", "ist", "norwegian", "lover", "belong", "honolulu", "lips", "escort", "retention", "exchanges", "pond", "rolls", "thomson", "barnes", "wondering", "malta", "daddy", "ferry", "rabbit", "profession", "seating", "dam", "separately", "physiology", "collecting", "das", "exports", "omaha", "tire", "dominican", "chad", "loads", "friendship", "heather", "passport", "unions", "treasury", "warrant", "frozen", "occupied", "josh", "royalty", "scales", "rally", "observer", "sunshine", "strain", "drag", "ceremony", "somehow", "arrested", "expanding", "provincial", "investigations", "ripe", "rely", "hebrew", "gained", "rochester", "dying", "laundry", "stuck", "solomon", "placing", "stops", "adjust", "assessed", "enabling", "filling", "sophisticated", "imposed", "silence", "soviet", "possession", "cu", "laboratories", "treaty", "vocal", "trainer", "organ", "stronger", "volumes", "advances", "vegetables", "lemon", "darkness", "nuts", "nail", "vienna", "implied", "span", "stanford", "stockings", "joke", "respondent", "packing", "statute", "rejected", "satisfy", "destroyed", "shelter", "chapel", "manufacture", "layers", "guided", "accredited", "appliance", "compressed", "bahamas", "powell", "mixture", "bench", "tub", "rider", "radius", "perspectives", "mortality", "logging", "hampton", "christians", "borders", "pads", "butts", "inns", "bobby", "impressive", "sheep", "accordingly", "architect", "railroad", "lectures", "challenging", "wines", "nursery", "harder", "cups", "ash", "microwave", "cheapest", "accidents", "stuart", "contributors", "salvador", "ali", "salad", "monroe", "tender", "violations", "foam", "temperatures", "paste", "clouds", "discretion", "tanzania", "preserve", "poem", "unsigned", "staying", "easter", "theories", "repository", "praise", "jeremy", "venice", "jo", "christianity", "veteran", "streams", "landing", "signing", "executed", "katie", "negotiations", "realistic", "integral", "asks", "relax", "namibia", "generating", "christina", "congressional", "synopsis", "hardly", "prairie", "reunion", "composer", "bean", "sword", "absent", "photographic", "sells", "ecuador", "hoping", "accessed", "spirits", "modifications", "coral", "float", "colin", "bias", "imported", "paths", "bubble", "por", "acquire", "contrary", "millennium", "tribune", "vessel", "acids", "cheaper", "admitted", "dairy", "admit", "mem", "fancy", "equality", "samoa", "achieving", "tap", "fisheries", "exceptions", "reactions", "beliefs", "ci", "companion", "squad", "analyze", "ashley", "scroll", "relate", "divisions", "swim", "wages", "suffer", "forests", "fellowship", "invalid", "concerts", "martial", "males", "victorian", "retain", "colours", "execute", "tunnel", "genres", "cambodia", "patents", "yn", "chaos", "lithuania", "wheat", "chronicles", "obtaining", "beaver", "distribute", "readings", "decorative", "confused", "compiler", "enlargement", "eagles", "bases", "vii", "accused", "bee", "campaigns", "unity", "loud", "bride", "rats", "defines", "airports", "instances", "indigenous", "begun", "brunette", "packets", "anchor", "socks", "parade", "corruption", "stat", "trigger", "incentives", "gathered", "essex", "notified", "differential", "beaches", "dramatic", "surfaces", "terrible", "cruz", "pendant", "dresses", "baptist", "scientist", "hiring", "clocks", "females", "wallace", "nevertheless", "reflects", "taxation", "fever", "cuisine", "surely", "practitioners", "transcript", "inflation", "thee", "ruth", "pray", "compounds", "pope", "drums", "contracting", "arnold", "reasonably", "chicks", "bare", "hung", "cattle", "radical", "graduates", "rover", "recommends", "controlling", "treasure", "flame", "tanks", "assuming", "monetary", "elderly", "pit", "arlington", "floating", "extraordinary", "tile", "indicating", "bolivia", "spell", "hottest", "stevens", "kuwait", "exclusively", "emily", "alleged", "limitation", "compile", "webster", "struck", "illustration", "plymouth", "warnings", "construct", "inquiries", "bridal", "annex", "mag", "inspiration", "tribal", "curious", "affecting", "freight", "eclipse", "sudan", "downloading", "shuttle", "aggregate", "stunning", "cycles", "affects", "detect", "actively", "knee", "prep", "pb", "complicated", "fastest", "butler", "injured", "decorating", "expressions", "ton", "courier", "shakespeare", "hints", "collapse", "unlikely", "oe", "gif", "pros", "conflicts", "beverage", "tribute", "wired", "immune", "travelers", "forestry", "barriers", "cant", "rarely", "infected", "offerings", "martha", "genesis", "barrier", "argue", "incorrect", "trains", "metals", "bicycle", "furnishings", "letting", "arise", "guatemala", "celtic", "thereby", "jamie", "particle", "perception", "minerals", "advise", "humidity", "bottles", "boxing", "wy", "renaissance", "pathology", "sara", "bra", "ordinance", "hughes", "bitch", "jeffrey", "chess", "operates", "survive", "oscar", "festivals", "menus", "joan", "possibilities", "duck", "reveal", "canal", "phi", "contributing", "herbs", "cow", "manitoba", "analytical", "missions", "watson", "lying", "costumes", "strict", "dive", "circulation", "drill", "offense", "bryan", "cet", "protest", "assumption", "jerusalem", "hobby", "tries", "invention", "nickname", "fiji", "enquiries", "washing", "exploring", "trick", "enquiry", "raid", "timber", "intense", "showers", "supporters", "ruling", "steady", "dirt", "statutes", "withdrawal", "myers", "drops", "predicted", "wider", "saskatchewan", "enrolled", "screw", "ministers", "publicly", "hourly", "blame", "geneva", "veterinary", "handed", "suffered", "informal", "incentive", "butterfly", "mechanics", "heavily", "fifty", "mistakes", "numerical", "ons", "uncle", "defining", "counting", "reflection", "sink", "accompanied", "assure", "invitation", "devoted", "princeton", "jacob", "sodium", "randy", "spirituality", "meanwhile", "proprietary", "timothy", "brick", "grip", "naval", "medieval", "porcelain", "bridges", "captured", "watt", "decent", "casting", "dayton", "translated", "shortly", "cameron", "pins", "carlos", "reno", "donna", "andreas", "warrior", "diploma", "cabin", "innocent", "scanning", "consensus", "polo", "copying", "delivering", "patricia", "horn", "eddie", "uganda", "fired", "journalism", "perth", "frog", "grammar", "intention", "syria", "disagree", "klein", "harvey", "tires", "logs", "undertaken", "hazard", "leo", "gregory", "episodes", "circular", "anger", "mainland", "illustrations", "suits", "chances", "snap", "happiness", "arg", "substantially", "bizarre", "glenn", "ur", "auckland", "fruits", "geo", "ribbon", "calculations", "doe", "conducting", "trinidad", "kissing", "wal", "handy", "swap", "exempt", "crops", "reduces", "accomplished", "geometry", "impression", "guild", "correlation", "gorgeous", "capitol", "sim", "dishes", "barbados", "nervous", "refuse", "extends", "fragrance", "mcdonald", "replica", "brussels", "tribe", "neighbors", "trades", "superb", "buzz", "transparent", "rid", "trinity", "charleston", "handled", "legends", "boom", "calm", "champions", "floors", "selections", "inappropriate", "exhaust", "comparing", "shanghai", "speaks", "burton", "vocational", "davidson", "copied", "scotia", "farming", "gibson", "fork", "troy", "roller", "batch", "organize", "appreciated", "alter", "ghana", "edges", "mixing", "handles", "skilled", "fitted", "albuquerque", "harmony", "distinguished", "projected", "assumptions", "shareholders", "twins", "rip", "triangle", "amend", "anticipated", "oriental", "reward", "windsor", "zambia", "completing", "hydrogen", "comparable", "chick", "advocate", "sims", "confusion", "copyrighted", "tray", "warranties", "escorts", "thong", "medal", "coaches", "vessels", "harbour", "walks", "sucks", "sol", "sage", "knives", "vulnerable", "arrange", "artistic", "bat", "honors", "booth", "reflected", "unified", "bones", "breed", "ignored", "polar", "fallen", "precise", "sussex", "respiratory", "invoice", "lip", "sap", "gather", "maternity", "backed", "alfred", "colonial", "carey", "forming", "embassy", "cave", "journalists", "danny", "rebecca", "slight", "proceeds", "indirect", "amongst", "wool", "foundations", "arrest", "horizon", "nu", "deeply", "marina", "liabilities", "prizes", "bosnia", "decreased", "patio", "tolerance", "lloyd", "describing", "optics", "pursue", "lightning", "overcome", "eyed", "ou", "quotations", "grab", "inspector", "attract", "brighton", "beans", "bookmarks", "ellis", "disable", "snake", "succeed", "leonard", "lending", "reminder", "xi", "searched", "riverside", "plains", "raymond", "abilities", "initiated", "sullivan", "za", "trap", "lonely", "fool", "ve", "lancaster", "suspended", "observe", "julia", "attitudes", "karl", "berry", "collar", "simultaneously", "racial", "bermuda", "amanda", "sociology", "exhibitions", "confident", "retrieved", "exhibits", "officially", "dies", "terrace", "bacteria", "replied", "novels", "recipients", "ought", "delicious", "traditions", "jail", "safely", "finite", "kidney", "periodically", "fixes", "sends", "durable", "allied", "throws", "moisture", "hungarian", "referring", "spencer", "uruguay", "transform", "tablets", "tuning", "gotten", "educators", "tyler", "futures", "vegetable", "verse", "humanities", "independently", "wanting", "custody", "scratch", "launches", "henderson", "bk", "britannica", "ellen", "competitors", "rocket", "bullet", "towers", "racks", "lace", "nasty", "latitude", "consciousness", "ste", "tumor", "ugly", "deposits", "beverly", "mistress", "encounter", "trustees", "watts", "duncan", "hart", "bernard", "resolutions", "ment", "forty", "tubes", "attempted", "col", "priest", "floyd", "ronald", "queue", "trance", "nicholas", "yu", "bundle", "hammer", "invasion", "witnesses", "runner", "rows", "administered", "notion", "sq", "skins", "mailed", "spelling", "arctic", "rewards", "beneath", "strengthen", "defend", "frederick", "seventh", "gods", "une", "welsh", "belly", "aggressive", "advertisements", "quarters", "stolen", "soonest", "haiti", "disturbed", "determines", "sculpture", "ears", "fist", "fitting", "fixtures", "mere", "agrees", "passengers", "quantities", "petersburg", "consistently", "cons", "elder", "cheers", "dig", "taxi", "punishment", "appreciation", "subsequently", "om", "nat", "gravity", "providence", "thumb", "restriction", "incorporate", "backgrounds", "treasurer", "essence", "flooring", "ethiopia", "mighty", "athletes", "humanity", "transcription", "holmes", "complications", "scholars", "remembered", "galaxy", "chester", "loc", "worn", "synthetic", "shaw", "vp", "segments", "testament", "twist", "stomach", "partially", "buried", "minimize", "darwin", "ranks", "wilderness", "debut", "generations", "tournaments", "bradley", "deny", "anatomy", "judy", "fraction", "trio", "proceeding", "cube", "defects", "uncertainty", "breakdown", "milton", "reconstruction", "subsidiary", "clarity", "rugs", "sandra", "adelaide", "encouraging", "furnished", "monaco", "settled", "folding", "comparisons", "beneficial", "belize", "fate", "promised", "penny", "robust", "threatened", "republicans", "discusses", "porter", "gras", "jungle", "ver", "responded", "rim", "zen", "ivory", "alpine", "dis", "prediction", "fabulous", "alias", "individually", "battlefield", "literally", "newer", "kay", "spice", "oval", "implies", "soma", "ser", "cooler", "consisting", "periodic", "submitting", "overhead", "ascii", "prospect", "shipment", "breeding", "citations", "geographical", "donor", "mozambique", "tension", "trash", "shapes", "tier", "earl", "manor", "envelope", "diane", "disclaimers", "excluded", "andrea", "breeds", "rapids", "sheffield", "bailey", "aus", "finishing", "emotions", "wellington", "incoming", "prospects", "bulgarian", "eternal", "cite", "aboriginal", "remarkable", "rotation", "nam", "productive", "boulevard", "eugene", "ix", "gdp", "pig", "metric", "minus", "penalties", "bennett", "imagination", "joshua", "armenia", "varied", "grande", "closest", "actress", "mess", "assign", "armstrong", "politicians", "lit", "accommodate", "tigers", "aurora", "una", "slides", "milan", "premiere", "lender", "villages", "shade", "chorus", "christine", "rhythm", "digit", "argued", "dietary", "symphony", "clarke", "sudden", "accepting", "precipitation", "lions", "ada", "pools", "tb", "lyric", "claire", "isolation", "speeds", "sustained", "matched", "approximate", "rope", "carroll", "rational", "fighters", "chambers", "dump", "greetings", "inherited", "warming", "incomplete", "chronicle", "fountain", "chubby", "grave", "legitimate", "biographies", "burner", "investigator", "plaintiff", "finnish", "gentle", "prisoners", "deeper", "muslims", "hose", "mediterranean", "worthy", "reveals", "architects", "saints", "carries", "sig", "duo", "excessive", "devon", "helena", "saves", "regarded", "valuation", "unexpected", "cigarette", "fog", "characteristic", "marion", "lobby", "egyptian", "tunisia", "outlined", "consequently", "treating", "punch", "appointments", "gotta", "cowboy", "narrative", "enormous", "karma", "consist", "betty", "queens", "quantitative", "lucas", "subdivision", "tribes", "defeat", "distinction", "honduras", "naughty", "hazards", "insured", "harper", "livestock", "exemption", "tenant", "cabinets", "tattoo", "shake", "algebra", "shadows", "holly", "silly", "yea", "mercy", "hartford", "freely", "marcus", "sunrise", "wrapping", "mild", "fur", "nicaragua", "tar", "belongs", "readily", "soc", "fence", "infinite", "diana", "relatives", "lindsay", "clan", "legally", "shame", "satisfactory", "revolutionary", "bracelets", "civilian", "mesa", "fatal", "remedy", "breathing", "briefly", "thickness", "adjustments", "genius", "discussing", "fighter", "flesh", "retreat", "adapted", "barely", "wherever", "estates", "rug", "democrat", "borough", "maintains", "failing", "ka", "retained", "pamela", "andrews", "marble", "extending", "jesse", "hull", "surrey", "dem", "blackberry", "highland", "meditation", "macedonia", "combining", "brandon", "instrumental", "giants", "organizing", "shed", "balloon", "winston", "ham", "solved", "tide", "hawaiian", "partition", "invisible", "consoles", "funk", "magnet", "translations", "jaguar", "reel", "sheer", "commodity", "posing", "wang", "kilometers", "bind", "thanksgiving", "rand", "hopkins", "urgent", "guarantees", "infants", "gothic", "cylinder", "witch", "buck", "indication", "eh", "congratulations", "cohen", "sie", "puppy", "acre", "cigarettes", "revenge", "expires", "enemies", "aqua", "chen", "emma", "finances", "accepts", "enjoying", "conventions", "eva", "patrol", "smell", "pest", "coordinates", "carnival", "roughly", "promises", "responding", "reef", "physically", "divide", "consecutive", "satin", "bon", "deserve", "attempting", "representations", "chan", "worried", "tunes", "garbage", "competing", "combines", "mas", "beth", "bradford", "len", "phrases", "kai", "peninsula", "chelsea", "boring", "reynolds", "dom", "jill", "accurately", "speeches", "reaches", "considers", "sofa", "ministries", "vacancies", "parliamentary", "prefix", "lucia", "savannah", "barrel", "typing", "nerve", "dans", "planets", "deficit", "boulder", "pointing", "renew", "coupled", "viii", "harold", "circuits", "texture", "jar", "somerset", "acknowledge", "thoroughly", "antigua", "nottingham", "thunder", "tent", "caution", "identifies", "qualification", "locks", "modelling", "namely", "miniature", "hack", "dare", "interstate", "pirates", "aerial", "hawk", "consequence", "rebel", "systematic", "perceived", "origins", "hired", "textile", "lamb", "madagascar", "nathan", "tobago", "presenting", "cos", "centuries", "magnitude", "richardson", "hindu", "vocabulary", "licking", "earthquake", "fundraising", "weights", "albania", "geological", "lasting", "wicked", "introduces", "kills", "pushed", "ro", "participated", "junk", "wax", "lucy", "answering", "hans", "impressed", "slope", "failures", "poet", "conspiracy", "surname", "theology", "nails", "evident", "epic", "saturn", "organizer", "nut", "sake", "twisted", "combinations", "preceding", "merit", "cumulative", "planes", "edmonton", "tackle", "disks", "arbitrary", "prominent", "retrieve", "lexington", "vernon", "sans", "irs", "fairy", "builds", "shaft", "lean", "bye", "occasional", "leslie", "deutsche", "ana", "innovations", "kitty", "drain", "monte", "fires", "algeria", "blessed", "luis", "reviewing", "cardiff", "cornwall", "favors", "potato", "panic", "explicitly", "sticks", "leone", "ez", "citizenship", "excuse", "reforms", "basement", "onion", "strand", "sandwich", "uw", "lawsuit", "alto", "cheque", "hierarchy", "influenced", "banners", "reject", "eau", "abandoned", "bd", "circles", "italic", "merry", "mil", "gore", "complement", "cult", "dash", "passive", "mauritius", "valued", "cage", "requesting", "courage", "verde", "extraction", "elevation", "coleman", "hugh", "lap", "utilization", "beverages", "jake", "efficiently", "textbook", "dried", "entertaining", "luther", "frontier", "settle", "stopping", "refugees", "knights", "hypothesis", "palmer", "medicines", "flux", "derby", "peaceful", "altered", "doctrine", "scenic", "intersection", "sewing", "consistency", "collectors", "conclude", "recognised", "munich", "oman", "propose", "azerbaijan", "lighter", "rage", "uh", "prix", "astrology", "pavilion", "tactics", "trusts", "occurring", "supplemental", "travelling", "talented", "annie", "pillow", "induction", "derek", "precisely", "shorter", "harley", "spreading", "provinces", "relying", "paraguay", "steal", "parcel", "refined", "bo", "fifteen", "widespread", "incidence", "fears", "predict", "boutique", "rolled", "avon", "incidents", "peterson", "rays", "shannon", "enhancing", "flavor", "alike", "walt", "homeless", "horrible", "hungry", "metallic", "blocked", "interference", "warriors", "palestine", "undo", "atmospheric", "wm", "dana", "halo", "curtis", "parental", "strikes", "lesser", "publicity", "marathon", "ant", "proposition", "pressing", "gasoline", "apt", "dressed", "scout", "belfast", "dealt", "niagara", "inf", "eos", "charms", "trader", "bucks", "allowance", "denial", "uri", "designation", "thrown", "raises", "gem", "duplicate", "criterion", "badge", "wrist", "civilization", "analyzed", "heath", "tremendous", "ballot", "varying", "remedies", "validity", "trustee", "weighted", "angola", "performs", "realm", "corrected", "jenny", "helmet", "salaries", "elephant", "yemen", "encountered", "scholar", "nickel", "surrounded", "geology", "creatures", "coating", "commented", "wallet", "cleared", "accomplish", "boating", "drainage", "corners", "broader", "vegetarian", "rouge", "yeast", "yale", "newfoundland", "sn", "pas", "clearing", "investigated", "ambassador", "coated", "intend", "stephanie", "contacting", "vegetation", "doom", "louise", "kenny", "specially", "owen", "hitting", "yukon", "beings", "bite", "aquatic", "reliance", "habits", "striking", "myth", "infectious", "singh", "gig", "gilbert", "continuity", "brook", "fu", "phenomenon", "ensemble", "assured", "biblical", "weed", "conscious", "accent", "eleven", "wives", "utilize", "mileage", "auburn", "unlock", "pledge", "vampire", "angela", "relates", "nitrogen", "dice", "dock", "differently", "framing", "organised", "musician", "blocking", "sorts", "limiting", "dispatch", "revisions", "papua", "restored", "hint", "armor", "riders", "chargers", "remark", "dozens", "varies", "reasoning", "rendered", "picking", "charitable", "guards", "annotated", "convinced", "openings", "buys", "replacing", "watershed", "councils", "occupations", "acknowledged", "nudity", "pockets", "granny", "pork", "zu", "equilibrium", "inquire", "pipes", "characterized", "laden", "cottages", "merge", "privilege", "edgar", "develops", "qualifying", "estimation", "barn", "pushing", "fleece", "fare", "pierce", "allan", "dressing", "sperm", "bald", "frost", "leon", "institutes", "mold", "dame", "fo", "sally", "yacht", "tracy", "prefers", "drilling", "herb", "ate", "breach", "whale", "traveller", "appropriations", "suspected", "tomatoes", "beginners", "instructors", "bedford", "stationery", "idle", "mustang", "unauthorized", "clusters", "competent", "momentum", "fin", "io", "pastor", "mud", "calvin", "uni", "shark", "contributor", "demonstrates", "phases", "grateful", "emerald", "gradually", "laughing", "grows", "cliff", "desirable", "tract", "ballet", "ol", "journalist", "abraham", "bumper", "afterwards", "religions", "garlic", "shine", "senegal", "explosion", "banned", "briefs", "signatures", "cove", "casa", "mu", "daughters", "conversations", "radios", "tariff", "opponent", "simplified", "muscles", "wrapped", "swift", "vagina", "eden", "distant", "champagne", "ala", "decimal", "deviation", "superintendent", "dip", "hostel", "housewives", "employ", "mongolia", "penguin", "magical", "influences", "irrigation", "miracle", "reprint", "reid", "hydraulic", "centered", "robertson", "yearly", "penetration", "wound", "belle", "rosa", "conviction", "hash", "omissions", "writings", "hamburg", "lazy", "qualities", "fathers", "charging", "cas", "marvel", "lined", "cio", "dow", "prototype", "petite", "apparatus", "terrain", "pens", "explaining", "yen", "strips", "gossip", "rangers", "nomination", "empirical", "rotary", "worm", "dependence", "beginner", "boxed", "lid", "cubic", "deaf", "commitments", "suggesting", "sapphire", "skirts", "mats", "remainder", "crawford", "labeled", "privileges", "marking", "commodities", "serbia", "sheriff", "griffin", "declined", "guyana", "spies", "neighbor", "elect", "highways", "concentrate", "intimate", "reproductive", "preston", "deadly", "molecules", "rounds", "refrigerator", "intervals", "sentences", "exclusion", "holocaust", "keen", "peas", "receivers", "disposition", "variance", "navigator", "investigators", "cameroon", "baking", "computed", "needle", "baths", "cathedral", "brakes", "og", "nirvana", "ko", "owns", "til", "sticky", "destiny", "generous", "madness", "climb", "blowing", "fascinating", "landscapes", "heated", "lafayette", "wto", "computation", "hay", "salvation", "dover", "adrian", "predictions", "accompanying", "vatican", "brutal", "selective", "arbitration", "token", "editorials", "zinc", "sacrifice", "seekers", "isa", "removable", "yields", "gibraltar", "levy", "suited", "anthropology", "skating", "aberdeen", "emperor", "grad", "bras", "belts", "blacks", "educated", "reporters", "burke", "proudly", "necessity", "rendering", "inserted", "pulling", "curves", "suburban", "touring", "clara", "tomato", "waterproof", "expired", "travels", "flush", "pale", "hayes", "humanitarian", "invitations", "functioning", "delight", "survivor", "garcia", "economies", "alexandria", "moses", "counted", "undertake", "declare", "continuously", "johns", "valves", "gaps", "impaired", "achievements", "donors", "tear", "jewel", "teddy", "convertible", "teaches", "ventures", "nil", "stranger", "tragedy", "julian", "nest", "painful", "velvet", "tribunal", "ruled", "pensions", "prayers", "nowhere", "cop", "paragraphs", "gale", "joins", "adolescent", "nominations", "wesley", "dim", "lately", "cancelled", "mattress", "likewise", "banana", "introductory", "cakes", "stan", "reservoir", "occurrence", "idol", "bloody", "remind", "worcester", "charming", "mai", "tooth", "disciplinary", "annoying", "respected", "stays", "disclose", "affair", "drove", "upset", "restrict", "beside", "mines", "portraits", "rebound", "logan", "mentor", "interpreted", "fought", "baghdad", "elimination", "metres", "hypothetical", "immigrants", "complimentary", "pencil", "freeze", "performer", "abu", "titled", "commissions", "sphere", "moss", "concord", "graduated", "endorsed", "ty", "surprising", "walnut", "lance", "ladder", "italia", "unnecessary", "dramatically", "liberia", "sherman", "cork", "hansen", "senators", "mali", "yugoslavia", "bleeding", "characterization", "colon", "likelihood", "lanes", "purse", "fundamentals", "contamination", "endangered", "compromise", "masturbation", "stating", "dome", "caroline", "expiration", "bless", "engaging", "negotiation", "crest", "opponents", "triumph", "nominated", "electoral", "welding", "deferred", "alternatively", "heel", "alloy", "plots", "polished", "yang", "gently", "locking", "casey", "controversial", "draws", "blanket", "bloom", "lou", "elliott", "recovered", "fraser", "justify", "blades", "loops", "surge", "aw", "tahoe", "advert", "possess", "demanding", "defensive", "sip", "forbidden", "vanilla", "deutschland", "picnic", "souls", "arrivals", "practitioner", "dumb", "smithsonian", "hollow", "vault", "securely", "examining", "groove", "revelation", "pursuit", "delegation", "wires", "dictionaries", "mails", "backing", "greenhouse", "sleeps", "blake", "transparency", "dee", "travis", "endless", "orbit", "niger", "bacon", "survivors", "colony", "cannon", "circus", "forbes", "mae", "mel", "descending", "spine", "trout", "enclosed", "feat", "cooked", "transmit", "fatty", "gerald", "pressed", "scanned", "reflections", "hunger", "sic", "municipality", "joyce", "detective", "surgeon", "cement", "experiencing", "fireplace", "endorsement", "disputes", "textiles", "missile", "closes", "seq", "persistent", "deborah", "marco", "assists", "summaries", "glow", "gabriel", "auditor", "violin", "prophet", "bracket", "isaac", "oxide", "oaks", "magnificent", "erik", "colleague", "naples", "promptly", "adaptation", "hu", "harmful", "sexually", "enclosure", "dividend", "newark", "kw", "paso", "phantom", "westminster", "turtle", "distances", "absorption", "treasures", "warned", "ware", "fossil", "mia", "badly", "apollo", "wan", "disappointed", "persian", "continually", "communist", "greene", "grenada", "creations", "jade", "scoop", "acquisitions", "foul", "earning", "excitement", "somalia", "verbal", "blink", "presently", "seas", "carlo", "mysterious", "novelty", "bryant", "tiles", "librarian", "switched", "stockholm", "pose", "grams", "richards", "promising", "relaxation", "goat", "render", "carmen", "ira", "sen", "thereafter", "hardwood", "temporal", "sail", "forge", "commissioners", "dense", "brave", "forwarding", "awful", "nightmare", "reductions", "southampton", "impose", "organisms", "telescope", "asbestos", "portsmouth", "meyer", "enters", "pod", "savage", "advancement", "wu", "willow", "resumes", "bolt", "gage", "throwing", "existed", "whore", "generators", "lu", "wagon", "dat", "favour", "knock", "urge", "generates", "potatoes", "thorough", "inexpensive", "kurt", "peers", "roland", "quilt", "huntington", "creature", "ours", "mounts", "syracuse", "lone", "refresh", "aluminium", "michel", "subtle", "notre", "shipments", "stripes", "antarctica", "cope", "shepherd", "cradle", "chancellor", "lime", "kirk", "flour", "controversy", "legendary", "sympathy", "choir", "avoiding", "beautifully", "blond", "expects", "fabrics", "hygiene", "wit", "poultry", "virtue", "burst", "examinations", "surgeons", "bouquet", "promotes", "mandate", "departmental", "ind", "corpus", "johnston", "terminology", "gentleman", "fibre", "reproduce", "shades", "jets", "qui", "threatening", "spokesman", "frankfurt", "prisoner", "daisy", "halifax", "encourages", "assembled", "earliest", "donated", "insects", "terminals", "crude", "morrison", "maiden", "sufficiently", "examines", "viking", "myrtle", "bored", "yarn", "knit", "conditional", "mug", "bother", "budapest", "knitting", "attacked", "mating", "compute", "arrives", "translator", "automobiles", "allah", "continent", "ob", "fares", "longitude", "resist", "challenged", "hoped", "pike", "insertion", "hugo", "wagner", "constraint", "touched", "strengthening", "cologne", "wishing", "ranger", "smallest", "insulation", "newman", "marsh", "scared", "infringement", "bent", "laos", "subjective", "monsters", "asylum", "robbie", "stake", "cocktail", "outlets", "varieties", "arbor", "poison", "dominated", "costly", "derivatives", "prevents", "stitch", "rifle", "severity", "notable", "warfare", "judiciary", "embroidery", "mama", "inland", "greenland", "interpret", "accord", "modest", "countryside", "sorting", "liaison", "unused", "bulbs", "consuming", "tourists", "sandals", "seconded", "waist", "attributed", "seychelles", "fatigue", "owl", "patriot", "sewer", "crystals", "kathleen", "bosch", "forthcoming", "num", "treats", "marino", "detention", "carson", "exceeds", "complementary", "gallon", "coil", "battles", "traders", "carlton", "bitter", "memorandum", "burned", "cardinal", "dragons", "converting", "romeo", "din", "incredibly", "delegates", "turks", "roma", "balancing", "att", "vet", "sided", "claiming", "courtyard", "presidents", "offenders", "depart", "cuban", "tenants", "expressly", "distinctive", "lily", "brackets", "unofficial", "oversight", "privately", "minded", "resin", "allies", "twilight", "preserved", "crossed", "kensington", "monterey", "linen", "rita", "ascending", "seals", "nominal", "alicia", "decay", "weaknesses", "quartz", "registers", "eighth", "usher", "herbert", "authorised", "improves", "advocates", "phenomena", "buffet", "deciding", "skate", "joey", "hackers", "tilt", "granite", "repeatedly", "lynch", "masses", "transformed", "athlete", "franc", "bead", "enforce", "similarity", "landlord", "leak", "timor", "assorted", "implements", "adviser", "flats", "compelling", "vouchers", "expecting", "heels", "voter", "urine", "capri", "towel", "ginger", "suburbs", "imagery", "sears", "als", "flies", "competence", "inadequate", "crying", "matthews", "amateurs", "crane", "defendants", "deployed", "governed", "considerably", "investigating", "rotten", "habit", "bulb", "scattered", "honour", "useless", "protects", "northwestern", "audiences", "iris", "coupe", "hal", "benin", "bach", "manages", "erosion", "abundance", "carpenter", "khan", "insufficient", "highlands", "peters", "fertility", "clever", "primer", "che", "lords", "bu", "tends", "enjoyable", "crescent", "freshman", "playground", "negotiate", "sixty", "exploit", "orgies", "permanently", "concentrated", "distinguish", "ei", "projections", "spark", "illustrate", "lin", "patience", "securing", "pathway", "shallow", "stir", "spike", "plated", "jacques", "drawer", "ingredient", "togo", "lifting", "judith", "curtain", "disclosed", "davies", "tactical", "pilots", "copenhagen", "expedition", "pile", "operative", "humour", "maturity", "caller", "distortion", "prosecution", "het", "tonga", "imprint", "natalie", "receipts", "assisting", "shirley", "sanctions", "goodbye", "emerged", "defect", "poorly", "goddess", "backs", "observers", "magnets", "formulas", "spacious", "shoulders", "nas", "argues", "wade", "soils", "chapman", "organs", "det", "loyalty", "beloved", "sometime", "ballard", "beating", "faithful", "libya", "offence", "invested", "whatsoever", "numbered", "terminated", "expands", "sedan", "pony", "comprises", "leap", "bolton", "founding", "swan", "covenant", "dropping", "archaeology", "sailor", "fittings", "lining", "banquet", "cares", "sanctuary", "flora", "statue", "hilary", "quotation", "equals", "hardy", "caravan", "diagrams", "harness", "manipulation", "bells", "vascular", "alongside", "impressions", "yankees", "forwarded", "gal", "transmitter", "dorothy", "freeman", "andre", "ems", "puppies", "relaxing", "delphi", "trophy", "emotion", "nets", "sights", "uniforms", "disasters", "asterisk", "versatile", "liquor", "kindergarten", "profitable", "wounded", "clayton", "derivative", "suffolk", "necklaces", "tot", "occupancy", "doses", "educate", "baked", "glove", "prejudice", "herzegovina", "probable", "baldwin", "incorporation", "rem", "evolutionary", "arriving", "decoration", "trojan", "assistants", "counselor", "spinal", "eliminated", "sooner", "struggling", "enacted", "tenure", "plush", "weber", "unstable", "elk", "nelly", "fulfill", "urged", "reflecting", "brent", "gaining", "definitive", "appropriately", "shifts", "inactive", "lansing", "traveled", "adapt", "extracted", "accession", "patterson", "carriage", "therein", "terminate", "rex", "fuels", "traditionally", "withdraw", "soy", "brett", "anchorage", "paula", "landmark", "greens", "neat", "naming", "stern", "bentley", "bud", "slaves", "dentist", "utilizing", "mis", "burkina", "tutor", "idiot", "comprised", "winnipeg", "charities", "mickey", "sebastian", "aliens", "domino", "raven", "defeated", "strains", "dwelling", "slice", "tanning", "gambia", "aspen", "lacking", "symbolic", "cest", "objectionable", "angles", "pressures", "webb", "mediation", "venus", "bump", "cowboys", "flames", "primitive", "auf", "stocking", "esp", "balloons", "malcolm", "georgetown", "norwich", "halls", "decorations", "pause", "simplicity", "postscript", "dividends", "relaxed", "periodicals", "pearson", "demon", "welcomed", "infinity", "gabon", "notation", "chandler", "aunt", "interviewed", "crow", "dia", "discontinued", "concurrent", "decides", "caption", "bargaining", "complain", "pulmonary", "adhesive", "toledo", "asses", "altitude", "compass", "closet", "couch", "evolved", "downs", "exceeding", "rogue", "unfair", "electronically", "augusta", "infantry", "renowned", "corridor", "philosophical", "scripture", "celebrating", "sahara", "justification", "rebuild", "vacant", "manuscript", "fixing", "gram", "hiding", "methodist", "dye", "sits", "alphabet", "shelves", "toes", "cleaned", "honored", "optic", "hannah", "telephones", "insect", "frances", "diaries", "chili", "grief", "leicester", "sweat", "dolphin", "pendants", "wonders", "ventilation", "masks", "bust", "lateral", "quake", "alley", "gardner", "sanders", "pathways", "telegraph", "pertaining", "memorable", "professors", "monument", "formally", "twain", "ile", "nevis", "dew", "lavender", "justified", "withdrawn", "breeze", "debates", "gems", "outgoing", "mann", "yankee", "outs", "deficiency", "gum", "progression", "adv", "saddle", "malaria", "loyal", "torrent", "odyssey", "spite", "nero", "capita", "imply", "inaccuracies", "tendency", "caledonia", "wholly", "chill", "utilized", "embrace", "ein", "liner", "manila", "auxiliary", "initiate", "ua", "elevated", "purely", "fry", "lifts", "vivid", "allegations", "stationary", "corresponds", "foil", "whitney", "celebrated", "alarms", "hunters", "roi", "allison", "stairs", "kt", "acted", "byron", "critique", "honestly", "skull", "continuation", "carnegie", "servant", "falcon", "jointly", "canadians", "avoided", "comprising", "tick", "terrier", "listened", "explanations", "renewed", "incorporating", "variant", "riley", "equatorial", "critic", "sediment", "translators", "squares", "deg", "bot", "lea", "vans", "od", "honeymoon", "percussion", "glue", "cone", "margins", "sands", "survived", "spinning", "adequately", "spectral", "prevalence", "dominica", "contaminated", "fragment", "finishes", "lecturer", "embroidered", "bucket", "steak", "commits", "cobra", "threw", "sutton", "djibouti", "authorize", "decorated", "credited", "cherokee", "apo", "ao", "recruit", "simmons", "gals", "hoc", "wherein", "appearances", "performers", "dessert", "dissertation", "walsh", "nos", "marry", "blankets", "enthusiasm", "confusing", "celebrations", "approaching", "bounce", "ivan", "spiral", "governors", "weakness", "wills", "katherine", "atoms", "jacobs", "mauritania", "tissues", "reminded", "drake", "cynthia", "roosevelt", "practicing", "schmidt", "nicely", "surprisingly", "expressing", "della", "laurel", "carolyn", "rails", "fried", "cairo", "ambulance", "practically", "traded", "signaling", "vivo", "domination", "shrimp", "chords", "molecule", "dedication", "desires", "woody", "dismissed", "cried", "psychic", "cracks", "analyzing", "sincerely", "beaten", "piercing", "antilles", "establishments", "marginal", "visions", "efficacy", "prestige", "cocaine", "accelerated", "pinnacle", "tucker", "recognizes", "plugs", "responsive", "supra", "omitted", "molly", "proximity", "ku", "belonging", "unbiased", "pear", "chiefs", "franz", "collision", "supplementary", "clue", "scandal", "lodges", "dangers", "lys", "travellers", "gia", "scream", "discrepancies", "pirate", "senses", "repeats", "willie", "rival", "slower", "simulated", "culinary", "fairfax", "beck", "huh", "accountant", "propaganda", "offender", "waterloo", "warwick", "rounded", "boarding", "vanity", "mitigation", "tome", "prof", "homer", "daylight", "macdonald", "gases", "dependency", "dioxide", "fireworks", "genus", "approached", "catching", "cutter", "connects", "ont", "liberals", "aperture", "roofing", "dixon", "elastic", "melody", "sins", "cousin", "hath", "recalls", "consultations", "debts", "phillip", "burial", "balcony", "prescriptions", "prop", "avril", "willis", "myths", "camden", "coupling", "knees", "neglect", "emerge", "winchester", "clutch", "shy", "poets", "auditorium", "pedro", "maid", "sid", "carrie", "towels", "canterbury", "trent", "barber", "intuitive", "rigid", "sta", "degradation", "ret", "orthodox", "erin", "ferguson", "fragments", "mariana", "qualitative", "claude", "minorities", "blown", "diffusion", "baton", "polynesia", "barton", "umbrella", "rods", "stimulation", "abbey", "pigs", "olivia", "refugee", "straps", "maya", "discourse", "lancashire", "headache", "stained", "marital", "socialist", "bruno", "attracted", "undertaking", "slavery", "notwithstanding", "feasible", "romans", "credibility", "shores", "fest", "thames", "flowing", "montenegro", "deed", "whirlpool", "perfumes", "sustain", "mechanic", "bauer", "eliminating", "rejection", "bowls", "dissemination", "cardinals", "cosmic", "dawson", "defective", "lengths", "beacon", "hoover", "politically", "elective", "forensic", "botanical", "quartet", "suspense", "drafting", "cruel", "observing", "advertised", "commencement", "southwestern", "conform", "helmets", "firing", "eager", "denise", "touching", "vacancy", "papa", "settlements", "strawberry", "chang", "gloria", "elevator", "pupil", "feast", "maggie", "redemption", "profound", "canton", "nina", "registering", "seth", "warn", "conservatives", "bonnie", "laying", "provisional", "compiling", "strive", "releasing", "martinique", "shells", "painter", "ankle", "peso", "leagues", "monkeys", "historically", "transitions", "prevented", "digits", "err", "banker", "sup", "easiest", "borrow", "bamboo", "lv", "denotes", "communicating", "ki", "decks", "vibration", "stepped", "vent", "blunt", "protector", "aux", "react", "understands", "rises", "issuing", "accents", "insane", "buddha", "voyage", "een", "colonel", "transitional", "mozart", "acceleration", "sketch", "hoffman", "balances", "firearms", "nightly", "pitt", "deduction", "dancer", "coats", "pol", "capsules", "hyde", "firmly", "doo", "dots", "pursuing", "aston", "mugs", "washed", "resonance", "mosaic", "rhodes", "fiesta", "vase", "forcing", "fairs", "flute", "durability", "meadows", "hindi", "harsh", "outfit", "substitution", "burma", "cease", "deserves", "aboard", "irving", "perfection", "joints", "overwhelming", "poles", "bounds", "lyon", "santiago", "vera", "advising", "altogether", "devils", "dignity", "europa", "wondered", "cheshire", "boyd", "sliding", "accumulation", "descriptive", "inst", "feasibility", "negotiating", "homo", "pier", "sioux", "cote", "premiums", "lutheran", "fellows", "valencia", "superman", "perkins", "ideally", "splash", "equip", "saga", "probation", "ast", "gran", "commissioned", "hedge", "ke", "fender", "violet", "dancers", "mutation", "envelopes", "alle", "compulsory", "favorable", "rue", "preparations", "maxwell", "illustrates", "inheritance", "curry", "oblique", "pearls", "worms", "satisfying", "succeeded", "apples", "elf", "dewey", "surviving", "pouch", "advent", "proposes", "hooks", "ces", "exploitation", "singers", "mayo", "tasmania", "mansion", "cha", "surrender", "schneider", "accumulated", "arsenal", "dub", "screws", "pyramid", "enjoys", "hacking", "stripe", "averages", "peaks", "tai", "como", "lisp", "limousine", "churchill", "affirmative", "keynote", "planted", "petitioner", "spoon", "bombs", "niche", "fortunately", "cigar", "vis", "calculating", "erie", "berkshire", "proportional", "credentials", "deprecated", "municipalities", "chin", "locker", "jenkins", "squash", "expectation", "severely", "spotted", "curse", "ajax", "coconut", "interrupt", "conductor", "wont", "liberation", "grandfather", "removes", "luxurious", "titan", "booked", "anita", "indirectly", "nile", "blessing", "lumber", "pillows", "portals", "illustrator", "asleep", "potassium", "prompted", "shout", "presidency", "abnormal", "delicate", "convince", "whoever", "straw", "lifted", "mankind", "uncertain", "paramount", "upright", "breakfasts", "inspectors", "emergencies", "ernest", "shocked", "alcoholic", "bakery", "lieutenant", "orchid", "histories", "loses", "atkins", "variability", "observatory", "soda", "waited", "preventive", "peach", "calculus", "stefan", "breathe", "dunn", "smiling", "ounces", "economically", "uncut", "intact", "noting", "shifting", "samurai", "moines", "ivy", "delegate", "lightly", "negotiated", "herman", "congestion", "runners", "stove", "accidental", "talents", "nixon", "refuge", "brady", "guadeloupe", "walton", "carved", "ark", "freak", "obstacles", "preferably", "bluff", "jasper", "sed", "newborn", "sadly", "laughed", "avail", "emerson", "regulate", "orchard", "mythology", "trousers", "hatch", "replaces", "tomb", "regina", "stein", "shortage", "privileged", "spill", "goodness", "drift", "extracts", "professions", "explored", "mysteries", "fuller", "decreases", "crisp", "cor", "keeper", "reinforced", "johannesburg", "spells", "specifying", "buddhist", "inevitable", "etiquette", "environ", "nic", "coloured", "births", "kr", "cubs", "wheeler", "ritual", "miguel", "pulp", "onset", "interpreter", "specimens", "initiation", "assay", "reconciliation", "pots", "recognizing", "leigh", "slam", "respects", "tents", "plaque", "accounted", "deposited", "lowe", "beavers", "crib", "defending", "pulls", "autonomous", "granting", "motoring", "appropriation", "condensed", "philippine", "theological", "quietly", "scenery", "drying", "assemblies", "collateral", "learner", "welcomes", "swallow", "tara", "transplant", "usenet", "marines", "lighthouse", "proves", "crab", "jen", "brightness", "maurice", "brooke", "consumed", "maxim", "bore", "depreciation", "technically", "enjoyment", "cows", "austrian", "correspond", "slate", "suzanne", "confined", "inhabitants", "straightforward", "delighted", "morton", "peel", "cue", "jupiter", "simultaneous", "monopoly", "debris", "han", "intentions", "pagan", "widow", "sac", "peg", "randall", "benson", "sleeves", "troubled", "footnote", "vibrant", "evolving", "sweater", "approximation", "skies", "barrett", "burners", "alison", "fitzgerald", "kicks", "disappeared", "canoe", "sovereign", "reminds", "organism", "corrupt", "violated", "correspondent", "drought", "bake", "hurricanes", "symptom", "laughter", "propagation", "ignorance", "explosive", "inventor", "scaling", "juicy", "moody", "fashioned", "grains", "vicinity", "thyroid", "purification", "heal", "southeastern", "wizards", "horoscope", "prosperity", "rainfall", "mum", "launching", "pedal", "plantation", "storing", "asa", "tote", "jumped", "seemingly", "tuned", "passionate", "staples", "mayer", "backward", "sour", "combustion", "scrap", "administer", "bilateral", "bella", "blondes", "disposable", "williamson", "sock", "gentlemen", "terra", "literal", "questioned", "guiding", "charcoal", "vapor", "beware", "aloud", "glorious", "overlap", "handsome", "grounded", "bail", "goose", "fn", "judgement", "cruiser", "cumberland", "gifted", "esteem", "cascade", "endorse", "strokes", "shelby", "hen", "ancestry", "dolphins", "adopting", "landed", "nucleus", "detached", "scouts", "warsaw", "ib", "mist", "verb", "chic", "objection", "phosphate", "noisy", "abide", "sentinel", "birthdays", "preserving", "vest", "neal", "economist", "meridian", "marriages", "regret", "stakes", "rotating", "brigade", "movable", "doubles", "bliss", "humiliation", "tens", "litter", "reflective", "abbreviations", "executing", "greenwich", "flooding", "rugged", "jelly", "grandmother", "renovation", "puma", "appoint", "panthers", "perceptions", "greenwood", "ignition", "humble", "petrol", "midway", "mania", "edwin", "ax", "clare", "recognise", "hostile", "aphrodite", "establishes", "whites", "rant", "trapped", "bolts", "diplomatic", "fringe", "linguistic", "internally", "planetary", "laurent", "ego", "manuel", "gaza", "influenza", "gill", "rude", "sang", "steele", "citing", "viewpoint", "nay", "servants", "meanings", "conception", "unemployed", "heavenly", "exeter", "amusement", "middlesex", "curl", "albanian", "overflow", "hastings", "subsidies", "thirds", "willingness", "implicit", "patriotic", "simplify", "darling", "schwartz", "satan", "ornaments", "oppose", "terrific", "definite", "congregation", "regiment", "cheer", "everett", "reviewers", "misleading", "marty", "vine", "vale", "whereby", "deceased", "sparks", "simpler", "captures", "capitalism", "hancock", "falkland", "cur", "mammals", "grape", "russ", "peppers", "deeds", "lively", "inequality", "educator", "premature", "tripod", "immigrant", "demonstrations", "obsolete", "rust", "lon", "interfere", "traps", "shuffle", "wardrobe", "vin", "successes", "racer", "fabrication", "guilt", "sweep", "nash", "exploited", "bladder", "inflammatory", "iss", "immunity", "bets", "doyle", "ducks", "paints", "neighbourhood", "cheating", "carr", "fade", "tastes", "storms", "smiled", "jurisdictions", "scrutiny", "regeneration", "lunar", "differentiation", "shields", "nonsense", "invented", "elaine", "posed", "subjected", "tasting", "gwen", "mob", "expose", "borrowing", "arises", "imf", "precautions", "branded", "manning", "lisbon", "forks", "monk", "boxer", "shining", "weigh", "clerical", "voyager", "hobart", "moose", "dorset", "buenos", "conscience", "crush", "mystic", "solicitation", "rectangular", "fischer", "pooh", "enthusiast", "positively", "shaping", "ich", "afghan", "inspire", "paulo", "torn", "meantime", "pumping", "patented", "revival", "disappear", "lever", "redundant", "regency", "tasty", "gag", "mccarthy", "heck", "civilians", "bark", "carts", "wasted", "cocoa", "invites", "cushion", "reversed", "lynx", "goa", "specimen", "ancestors", "panther", "mixes", "graves", "branding", "examiner", "vineyard", "meadow", "feeder", "mercer", "roms", "goodman", "listener", "chloride", "awaiting", "kane", "becker", "bulls", "orion", "councillor", "hurry", "clarkson", "beneficiary", "hanson", "offspring", "panorama", "roth", "odor", "demanded", "wastes", "clash", "fidelity", "sis", "castro", "flew", "holden", "ale", "sem", "rhapsody", "trumpet", "solitaire", "decreasing", "freezing", "kaiser", "wallis", "criminals", "retire", "rumors", "accomplishments", "emergence", "theatres", "apex", "crimson", "compassion", "needing", "twentieth", "pronounced", "extensively", "stain", "conrad", "wished", "transient", "kicked", "coloring", "curb", "reign", "trivial", "coke", "clauses", "baron", "sensible", "unlawful", "bates", "webs", "swinging", "accountable", "thrust", "proving", "opposing", "novice", "hewitt", "dei", "delightful", "cane", "cruising", "fury", "personalities", "stiff", "todo", "noah", "wore", "christchurch", "traces", "rabbi", "puffy", "weston", "headings", "enthusiasts", "ridiculous", "scattering", "secretaries", "contracted", "elbow", "fights", "scholarly", "detailing", "stark", "roberto", "strongest", "hammond", "padded", "circa", "revise", "contributes", "surroundings", "proficiency", "uranium", "honours", "consolidate", "daniels", "billions", "hut", "stafford", "labrador", "refusal", "lima", "suppression", "weaver", "readiness", "secular", "majesty", "fishery", "teresa", "distributing", "estimating", "outdated", "dues", "pewter", "distress", "pumpkin", "notably", "intends", "trevor", "homosexual", "garment", "supplying", "secondly", "razor", "cough", "cerebral", "grandma", "oceans", "displacement", "backwards", "arrows", "volunteering", "presumably", "plea", "constructive", "bundles", "tibet", "pres", "isles", "stretching", "ovens", "garrett", "esther", "abundant", "deductible", "priests", "accompany", "compares", "hesitate", "inspiring", "prey", "deposition", "laurie", "tas", "zodiac", "pavement", "keller", "pedestrian", "fencing", "artery", "inlet", "rub", "violate", "stimulate", "realise", "fluids", "conveniently", "lick", "gov", "stealth", "ter", "ness", "repayment", "canopy", "gloss", "whip", "porch", "pertinent", "lifelong", "promoter", "collegiate", "construed", "interchange", "remotely", "fletcher", "concise", "fibers", "handful", "brains", "curtains", "eaten", "indigo", "retaining", "kelley", "autobiography", "conditioned", "prohibition", "motions", "emphasize", "excite", "rebels", "believing", "hilarious", "salisbury", "gu", "quoting", "sinks", "steep", "dynasty", "creed", "nan", "raiders", "spreads", "elegance", "volatile", "pointers", "sensory", "throne", "chartered", "slopes", "socially", "unfortunate", "seized", "territorial", "leases", "consisted", "randolph", "memoirs", "alkaline", "expire", "och", "midst", "borne", "forgive", "competitor", "mansfield", "neighbours", "marvin", "conversions", "usable", "tempo", "mutations", "readable", "almanac", "conway", "ay", "gail", "responds", "denote", "slayer", "payne", "purchaser", "relies", "inserting", "tibetan", "prepares", "concludes", "waterford", "rodney", "cylinders", "mus", "selects", "fulton", "directing", "nationality", "torch", "zurich", "stretched", "depressed", "encounters", "haunted", "spares", "symmetry", "bout", "salons", "olympia", "hank", "negligence", "screened", "helper", "carlisle", "rancho", "transferring", "stepping", "hacks", "attic", "appetite", "sensation", "piper", "morality", "honorable", "wealthy", "handicap", "skinny", "sewage", "endowment", "demonstrating", "avec", "sonoma", "esta", "defender", "amos", "wretch", "sunlight", "stems", "wo", "ventura", "convey", "ang", "evergreen", "bearings", "govern", "feather", "fond", "sore", "fiat", "sixteen", "blinds", "traits", "tightly", "graded", "successor", "intrusion", "sickness", "guiana", "underneath", "prohibit", "noel", "cans", "abused", "avery", "brushes", "tenth", "anthology", "prosecutor", "smiles", "merged", "auditors", "grandchildren", "desks", "capsule", "aided", "suspend", "eternity", "introductions", "weighing", "currents", "aide", "kindly", "nes", "protests", "sharks", "notch", "minors", "dances", "revealing", "reprinted", "fernando", "mapped", "resurrection", "lieu", "decree", "tor", "discovering", "tuberculosis", "lacks", "horizons", "daytime", "elaborate", "contour", "gamble", "fra", "descent", "gravel", "analyse", "disturbing", "judged", "shutter", "illusion", "ambitious", "ole", "notorious", "ibid", "residue", "reds", "enlarged", "stephens", "transforming", "stripping", "bart", "assert", "fluctuations", "bowie", "archaeological", "inspect", "thrice", "babylon", "edison", "casualty", "musings", "poses", "noir", "eli", "evan", "mushroom", "designate", "scent", "sequel", "gymnastics", "titanic", "knob", "wolves", "exquisite", "upward", "sentenced", "dundee", "principe", "acquiring", "judging", "unchanged", "kicking", "meg", "fines", "grasp", "streak", "ounce", "thirteen", "tragic", "theodore", "buena", "irrelevant", "professionally", "liberties", "sounding", "milano", "toast", "happily", "hooked", "shrink", "knox", "unesco", "mutually", "beaded", "remembering", "boca", "exodus", "compartment", "brittany", "dove", "testified", "iis", "cunningham", "derive", "affinity", "presbyterian", "pretend", "buddhism", "amnesty", "borrower", "gloucester", "warrants", "owens", "fairness", "needles", "coll", "quota", "discreet", "versa", "imp", "oi", "mack", "pu", "sung", "lowell", "whichever", "starr", "elliot", "uae", "chooses", "tuscany", "crowded", "tickling", "wee", "unreal", "wounds", "advisers", "manufactures", "physiological", "addison", "charters", "generalized", "unprecedented", "flint", "dummy", "financially", "awake", "sanitation", "swivel", "ally", "dissolved", "cleanliness", "kung", "collectively", "inhibition", "burnt", "solidarity", "frustrated", "muhammad", "alma", "ger", "hanover", "inverse", "clifton", "holt", "isis", "verdict", "nominee", "medals", "dickinson", "christi", "lister", "recurring", "studs", "rhetoric", "modifying", "incubus", "impulse", "surveyed", "creditors", "dull", "tis", "cabins", "commenced", "ballroom", "employing", "satellites", "ignoring", "stevenson", "coherent", "beetle", "converts", "majestic", "bicycles", "omni", "clifford", "critically", "cy", "composers", "localities", "owe", "reciprocal", "accelerate", "hatred", "questioning", "manifest", "indications", "petty", "permitting", "som", "behave", "bees", "zeppelin", "felix", "shiny", "carmel", "encore", "smash", "angelina", "braun", "destructive", "sockets", "claimant", "psa", "ample", "countless", "energies", "repealed", "listeners", "abusive", "merits", "scarf", "strangers", "garland", "voor", "riviera", "apprentice", "obscure", "napoleon", "glamour", "hated", "sigh", "trolley", "principals", "sidney", "spicy", "frankly", "chronological", "itinerary", "fools", "beard", "discoveries", "economical", "miniatures", "wedge", "adjusting", "mock", "peggy", "bats", "patriots", "ruins", "sheila", "dependencies", "benton", "chateau", "denis", "homestead", "changer", "sergeant", "melt", "syrian", "ned", "cypress", "courtney", "cites", "prospectus", "protectors", "interiors", "encouragement", "disadvantages", "abbott", "tailor", "chocolates", "faux", "supervised", "interpreting", "pascal", "tha", "serenity", "ore", "pant", "sheridan", "gallons", "attainment", "sanitary", "cooperate", "dreaming", "fortunate", "mushrooms", "interpretations", "geoffrey", "faults", "silva", "grease", "diablo", "cairns", "premise", "epidemic", "prima", "rite", "cinnamon", "lac", "discharged", "alba", "underworld", "variants", "palms", "lawsuits", "seated", "lattice", "realization", "absorbed", "sirius", "chord", "vous", "turf", "asphalt", "improper", "dilemma", "rebuilding", "livingston", "commenting", "shifted", "tangible", "smoked", "hawks", "irons", "comet", "berg", "baltic", "corrective", "competency", "muse", "probing", "teachings", "tyne", "fowler", "xv", "youngest", "contingent", "refreshing", "syrup", "xii", "warmth", "hawkins", "lust", "correlated", "augustine", "dominion", "verses", "astronomical", "solvent", "luna", "amplitude", "aesthetic", "commercially", "dion", "wolfgang", "completeness", "irregular", "barker", "solids", "capturing", "certify", "consulted", "realised", "jude", "eighteen", "singular", "jennings", "demons", "unacceptable", "redistribute", "coping", "baxter", "outbreak", "abdominal", "deficiencies", "curved", "milestone", "erase", "lien", "nip", "bites", "prose", "marx", "incidental", "toni", "arguing", "vein", "hale", "swear", "bel", "clown", "spontaneous", "summers", "taboo", "equestrian", "malicious", "consume", "amazed", "fourteen", "legislators", "volcano", "capacities", "skeleton", "tsp", "suspects", "displaced", "sounded", "honesty", "dwarf", "bis", "northeastern", "shocks", "rewarding", "battalion", "candid", "schooling", "thornton", "schoolgirl", "caesar", "pines", "stellar", "davenport", "locating", "monogram", "philippe", "aix", "ornament", "urges", "sophie", "attacking", "microscope", "threaten", "bait", "badges", "kitten", "brides", "dent", "stealing", "bullets", "emphasized", "glossy", "informations", "haired", "alterations", "pablo", "biographical", "confirms", "cavity", "molded", "vladimir", "ida", "probate", "terrestrial", "completes", "beams", "props", "incense", "formulated", "dough", "stool", "towing", "welch", "rosemary", "millionaire", "turquoise", "exposures", "boone", "substituted", "horde", "paperwork", "nanny", "suburb", "hutchinson", "cohort", "succession", "alliances", "sums", "averaged", "glacier", "pueblo", "rigorous", "relieve", "clarion", "override", "angus", "enthusiastic", "lame", "squeeze", "sar", "burgundy", "struggles", "farewell", "soho", "ashes", "vanguard", "natal", "locus", "evenings", "misses", "troubles", "elton", "purity", "shaking", "witnessed", "cellar", "friction", "prone", "valerie", "enclosures", "mer", "equitable", "fuse", "lobster", "judaism", "atlantis", "amid", "onions", "corinthians", "crosses", "uncomfortable", "sylvia", "furnace", "poisoning", "doubled", "clues", "inflammation", "rabbits", "icc", "transported", "crews", "goodwill", "anxious", "tariffs", "norris", "ly", "baptism", "cutlery", "overlooking", "knot", "rad", "gut", "staffordshire", "factories", "swords", "advancing", "timed", "evolve", "yuan", "esa", "suspicious", "leased", "subscribed", "tate", "dartmouth", "brewing", "coop", "blossom", "scare", "confessions", "bergen", "lowered", "thief", "prisons", "pictured", "feminine", "grabbed", "rocking", "nichols", "blackwell", "fulfilled", "sweets", "nautical", "imprisonment", "employs", "gutenberg", "bubbles", "ashton", "pitcher", "judgments", "muscular", "motif", "illnesses", "plum", "saloon", "prophecy", "loft", "historian", "elm", "facsimile", "hurts", "folded", "sofia", "comprise", "lump", "disposed", "chestnut", "engraved", "halt", "alta", "pastoral", "unpaid", "ghosts", "doubts", "locality", "substantive", "bulletins", "worries", "hug", "rejects", "spear", "nigel", "referee", "transporter", "jolie", "broadly", "ethereal", "crossroads", "aero", "constructing", "smoothly", "parsons", "bury", "blanc", "autonomy", "bounded", "insist", "birch", "slash", "exercised", "detecting", "howell", "digestive", "entertain", "cinderella", "sesame", "duct", "touches", "joanne", "housewife", "pursued", "lend", "corvette", "yachts", "stacy", "christie", "unrelated", "lois", "levi", "stimulating", "mont", "misuse", "cosmos", "speculation", "dixie", "pans", "enforced", "legion", "fulfillment", "assertion", "shook", "lincolnshire", "dismissal", "mah", "shocking", "overland", "prolonged", "isaiah", "backbone", "unanimously", "sausage", "neighboring", "uncommon", "centralized", "stratford", "heidi", "objections", "unpublished", "ames", "slaughter", "enlightenment", "pistol", "juniors", "rockets", "seymour", "arithmetic", "supposedly", "bombay", "originals", "enrichment", "milford", "buckle", "bartlett", "fetch", "kitchens", "wat", "rey", "divers", "townsend", "blackburn", "founders", "sundays", "upside", "admiral", "patron", "sandwiches", "sinclair", "boiler", "anticipate", "induce", "annapolis", "padding", "diagonal", "unite", "cracked", "debtor", "polk", "mets", "shear", "mortal", "sovereignty", "franchises", "rams", "cleansing", "gown", "ponds", "archery", "excludes", "sabbath", "ruin", "trump", "nate", "escaped", "precursor", "mates", "stella", "passages", "vu", "cereal", "comprehension", "sy", "tow", "resolving", "drills", "alexandra", "champ", "agreeing", "rented", "deductions", "harrisburg", "brushed", "augmentation", "otto", "annuity", "assortment", "credible", "ik", "cultured", "importing", "deliberately", "openly", "crawl", "theo", "sparkling", "bindings", "convincing", "flaws", "este", "tracing", "deviations", "incomes", "fragile", "jeremiah", "sapiens", "nyt", "olsen", "serbian", "hai", "restoring", "sanchez", "rushing", "behold", "amherst", "alteration", "murdered", "hazel", "ledger", "scarlet", "crushed", "laughs", "connie", "referendum", "modulation", "statues", "depths", "spices", "communion", "uncertainties", "colonies", "followers", "caldwell", "squadron", "bei", "rupee", "subsidy", "demolition", "irene", "felony", "lungs", "monuments", "veronica", "filtered", "growers", "vinci", "adj", "haul", "acknowledgement", "duly", "roasted", "tenders", "inviting", "rig", "ov", "mick", "mustard", "strait", "masterpiece", "obey", "donkey", "jacks", "conceived", "boasts", "praying", "oss", "multiply", "intercourse", "radial", "mare", "instructed", "stole", "kirby", "armour", "summarized", "avalanche", "northampton", "manuscripts", "cary", "exhibited", "disciples", "shaving", "bishops", "kite", "destroying", "humorous", "faa", "corona", "heap", "griffith", "erection", "quasi", "energetic", "disturbance", "saunders", "ribbons", "jew", "exile", "bilder", "reside", "cashier", "jaw", "butterflies", "eats", "knots", "flea", "offences", "anton", "pals", "celebrates", "hail", "armenian", "longitudinal", "historians", "realities", "mentions", "samson", "jumps", "fleming", "optimistic", "wasting", "acclaimed", "seldom", "morrow", "glitter", "giovanni", "lasted", "awhile", "scaled", "contingency", "wiltshire", "vague", "wraps", "constituents", "herd", "handicapped", "exported", "lag", "warns", "harmless", "sting", "bravo", "believers", "dispersion", "curiosity", "resting", "missiles", "persistence", "coarse", "continents", "carpets", "recovering", "submarine", "blessings", "prevailing", "originated", "axe", "sculptures", "intrinsic", "thoughtful", "nicht", "archer", "hertfordshire", "warmer", "calf", "basil", "grouped", "dominate", "orient", "contra", "damaging", "populated", "renee", "boiling", "journeys", "parsing", "splitting", "derbyshire", "abandon", "rave", "ej", "dy", "cigars", "nicolas", "inference", "ras", "recalled", "transformer", "weiss", "declarations", "rib", "chattanooga", "giles", "drafts", "excursions", "jerk", "shack", "marrow", "tavern", "bathing", "lambert", "epilepsy", "allowances", "goggles", "ses", "unhappy", "foregoing", "certainty", "sleek", "gerard", "antarctic", "ord", "successive", "neglected", "ariel", "monty", "cafes", "classmates", "hitch", "fracture", "ama", "foremost", "nineteenth", "chesapeake", "mahogany", "actresses", "clarence", "ernst", "buster", "moderated", "mal", "nassau", "flap", "ignorant", "allowable", "compositions", "sings", "marcos", "sorrow", "carte", "canned", "collects", "treaties", "endurance", "teaspoon", "insulated", "dupont", "harriet", "philosopher", "rectangle", "woo", "queer", "pains", "decatur", "wrapper", "ahmed", "buchanan", "drummer", "sobre", "ceremonies", "satisfies", "appellate", "comma", "conformity", "avant", "supper", "fulfilling", "hooded", "instability", "seminary", "presenter", "offenses", "emulation", "lengthy", "sonata", "fortress", "contiguous", "perez", "inaccurate", "explanatory", "settlers", "stools", "ministerial", "xavier", "torah", "fao", "publishes", "stacks", "owning", "andersen", "sermon", "facilitating", "complained", "ferdinand", "taps", "thrill", "lagoon", "undoubtedly", "withheld", "insisted", "reluctant", "headaches", "ramsey", "oath", "pigeon", "rivals", "freed", "constrained", "parrot", "magnum", "invoked", "invaluable", "keystone", "inclined", "gala", "cheek", "traction", "utterly", "gavin", "illuminated", "lasts", "gloucestershire", "psychologist", "dane", "claudia", "perpetual", "solicitor", "clustering", "glimpse", "verbatim", "innocence", "quicker", "grandparents", "cardboard", "attributable", "sketches", "angelo", "tertiary", "exhausted", "smarter", "shelters", "attain", "dora", "inconvenience", "tang", "vaccination", "farther", "chats", "riot", "fats", "mandarin", "dungeon", "germans", "lilly", "shire", "mosquito", "kashmir", "lyons", "putnam", "corpse", "speedy", "ming", "lush", "barrels", "transformations", "analogue", "werner", "clyde", "honorary", "irwin", "brewer", "exchanged", "adhere", "fran", "rafael", "ccc", "enquire", "toilets", "mains", "whales", "lindsey", "parity", "partitions", "grim", "hubbard", "prism", "chasing", "flop", "aggregation", "shelley", "batting", "borrowed", "rests", "toss", "depicted", "grapes", "proposing", "winding", "ripped", "cobalt", "pity", "downward", "catalogues", "aspire", "harvesting", "garfield", "groom", "jewels", "saturated", "georges", "quincy", "doughty", "weeds", "stripped", "clive", "fixture", "canary", "steadily", "imagined", "darby", "woke", "fills", "proportions", "grips", "clergy", "solicitors", "moderately", "altar", "salvage", "stanton", "creators", "kilometres", "cuff", "repeating", "empires", "oyster", "sturdy", "massacre", "undergo", "risen", "blended", "imperative", "beg", "digging", "lantern", "catches", "evangelical", "eaton", "ruler", "henri", "tokens", "piping", "swept", "staring", "seventy", "troop", "arose", "decomposition", "chatham", "becky", "elders", "interpreters", "supporter", "klaus", "conquest", "repairing", "assemble", "whistle", "dresden", "diversified", "fertilizer", "analytic", "predominantly", "amethyst", "woodward", "rewritten", "concerto", "adorable", "ambition", "torres", "apologize", "restraint", "eddy", "condemned", "berger", "parole", "corey", "kendall", "slips", "trays", "stewardship", "esq", "kisses", "kerr", "regulating", "flock", "exporting", "arabian", "bending", "boris", "ammunition", "vega", "pleasures", "shortest", "denying", "shave", "sexe", "disruption", "galway", "colt", "artillery", "furnish", "precedence", "grinding", "rubbish", "missionary", "knocked", "swamp", "pitching", "bordeaux", "manifold", "wf", "tornado", "possessed", "upstairs", "turtles", "vauxhall", "welcoming", "learns", "manipulate", "dividing", "hickory", "renovated", "inmates", "slices", "cody", "lawson", "quo", "damned", "beethoven", "faint", "rebuilt", "proceeded", "lei", "tentative", "peterborough", "fierce", "jars", "authenticity", "hips", "rene", "gland", "wigs", "resignation", "striped", "zion", "blends", "garments", "fraternity", "tapestry", "originating", "stu", "chap", "blows", "inevitably", "converse", "gardener", "winnie", "ita", "higgins", "warwickshire", "penguins", "attracting", "jeeves", "harp", "wes", "denton", "anthem", "tack", "whitman", "nowadays", "woodstock", "sack", "inferior", "abuses", "inspected", "deb", "jockey", "indicative", "incumbent", "ithaca", "edmund", "upholstery", "aggression", "practiced", "ella", "casualties", "monarch", "housed", "administering", "temptation", "havana", "roe", "nasal", "restrictive", "costing", "ranged", "hier", "spruce", "paradox", "billings", "jeanne", "oxidation", "marin", "halfway", "amending", "conflicting", "georgian", "compensate", "recherche", "loser", "claus", "braves", "cracking", "sued", "shoots", "interrupted", "hemisphere", "miranda", "clover", "kindness", "porto", "directs", "jolly", "snakes", "swelling", "spanning", "politician", "femme", "unanimous", "railways", "approves", "scriptures", "misconduct", "lester", "resides", "wording", "obliged", "perceive", "rockies", "siege", "exercising", "voluntarily", "atkinson", "nord", "truths", "grouping", "wolfe", "thereto", "authorizing", "enamel", "toby", "radiant", "virgins", "firstly", "martini", "butte", "reeves", "suspicion", "disadvantage", "bastard", "spends", "hicks", "pratt", "pedigree", "fraudulent", "sherwood", "forgiveness", "almond", "har", "petitions", "francais", "trenton", "chalk", "omar", "alexis", "axle", "puppet", "cultivation", "surveying", "grazing", "pillar", "mirage", "questionable", "seaside", "precinct", "renamed", "cobb", "unbelievable", "soluble", "piracy", "rowing", "siding", "hardest", "forrest", "reminders", "negro", "blanca", "equivalents", "johann", "pineapple", "wrath", "opal", "simplest", "patrons", "peculiar", "toon", "europeans", "commence", "descendants", "redmond", "safeguard", "lars", "obsession", "grind", "albeit", "billiards", "clint", "bankers", "righteous", "eo", "redistribution", "freaks", "tra", "sincere", "intentionally", "blitz", "tended", "censorship", "cactus", "viva", "attained", "blew", "howe", "nap", "splendid", "janice", "lava", "leonardo", "sucked", "scissors", "cooks", "sharply", "granada", "laurence", "rebellion", "rainy", "tho", "regent", "evelyn", "vinegar", "vie", "pluto", "gil", "vail", "fisherman", "misery", "undergoing", "limerick", "envy", "sweeping", "healthier", "ussr", "preface", "jameson", "grievance", "unread", "sentiment", "pencils", "galloway", "forged", "viola", "disclosures", "provence", "computerized", "rustic", "rumor", "dillon", "shah", "eleanor", "deception", "conducts", "divorced", "rushed", "weighs", "magnolia", "diver", "disappointment", "castles", "notions", "plateau", "dexter", "palette", "blaze", "wreck", "threatens", "strengthened", "sammy", "wakefield", "devastating", "centro", "arabs", "bild", "robbery", "eine", "jasmine", "crochet", "brock", "crowds", "hoops", "macon", "stamped", "increment", "ju", "ideals", "chloe", "ape", "gee", "apologies", "malignant", "dismiss", "preceded", "lawful", "stag", "crosby", "rash", "gateways", "collapsed", "horns", "diversion", "fantasies", "beginnings", "reversal", "lex", "presses", "ordination", "oxfordshire", "yves", "tandem", "boil", "deliberate", "gagged", "surprises", "abe", "roc", "barley", "potent", "vo", "amusing", "mastering", "nerves", "retains", "chimney", "naomi", "proverbs", "risky", "mistaken", "carving", "miracles", "clair", "slipped", "realism", "crete", "fractions", "bloodhound", "sherry", "desperately", "indies", "tulip", "madame", "remedial", "vain", "bert", "dalton", "bologna", "departing", "maze", "barefoot", "remuneration", "bohemian", "imposing", "damon", "tivoli", "rode", "amen", "marching", "evacuation", "owing", "warp", "catholics", "imo", "faculties", "denies", "reinforce", "inception", "draper", "bowman", "subversion", "benny", "spires", "barney", "homosexuality", "declares", "masonry", "medicinal", "accrued", "temples", "realizing", "annum", "cemeteries", "indoors", "telescopes", "magellan", "champs", "averaging", "salads", "addicted", "flashlight", "disappointing", "eighty", "unlocked", "scarce", "roche", "ropes", "spiders", "obedience", "plague", "diluted", "canine", "gladly", "brewery", "lineage", "mehr", "brew", "vaughan", "kern", "julius", "coup", "cannes", "morse", "dominance", "piston", "itu", "cords", "revisited", "cass", "sealing", "topped", "rag", "despair", "fore", "absorb", "injected", "alps", "commodore", "enlisted", "prophets", "supernatural", "overlooked", "ditch", "feared", "prelude", "rowe", "slick", "limestone", "commentaries", "manpower", "lec", "chunk", "reels", "lob", "slept", "gregg", "drafted", "chalet", "hopper", "sus", "specialization", "abstraction", "ludwig", "scandinavian", "detained", "luncheon", "zenith", "browns", "waits", "tenor", "softly", "plenary", "scrub", "wilkinson", "limb", "intestinal", "poe", "refusing", "suffers", "occupy", "gan", "bethlehem", "caves", "authoritative", "celestial", "immense", "audrey", "merlin", "aiming", "seizure", "stuttgart", "diplomacy", "differing", "foreigners", "limp", "capitalist", "mute", "prescott", "protestant", "metre", "tricky", "ordinances", "koch", "topaz", "ans", "imaginary", "albion", "sutherland", "dar", "dart", "wrought", "robe", "theresa", "heidelberg", "multitude", "tutors", "ezra", "housekeeping", "captive", "kettle", "visitation", "chr", "gibbs", "baggage", "dusty", "patty", "serena", "satire", "tortured", "pioneers", "crate", "episcopal", "moonlight", "mast", "unfinished", "goth", "cared", "affection", "sworn", "bowen", "vicious", "educating", "kin", "cozy", "mackenzie", "slippers", "earthquakes", "hayward", "wandering", "comb", "liquids", "beech", "vineyards", "amer", "zur", "frogs", "consequential", "unreasonable", "osborne", "stimulus", "economists", "miners", "agnes", "constituency", "rocker", "acknowledges", "alas", "sawyer", "maori", "tense", "predicting", "filipino", "cooled", "prudential", "basel", "migrant", "devotion", "invoke", "arte", "leaning", "paddle", "watkins", "oxley", "anterior", "chop", "rooted", "onyx", "benches", "illumination", "freedoms", "foolish", "finale", "weaker", "foley", "fir", "stirling", "moran", "compose", "nausea", "comfortably", "hoop", "temps", "clearer", "floods", "fritz", "mover", "modeled", "erica", "malaga", "sustaining", "repaired", "diocese", "francois", "obituary", "painters", "thistle", "tem", "sleepy", "footnotes", "rupert", "shrine", "purified", "striving", "dire", "attendant", "gull", "jour", "mir", "northumberland", "memoir", "betsy", "meredith", "fauna", "cliffs", "hayden", "roadside", "smells", "dispose", "waking", "feathers", "reflex", "falcons", "spurs", "sion", "crashed", "travelled", "urgency", "gould", "brit", "eliza", "graduating", "rims", "harmonic", "darts", "shin", "intriguing", "flaw", "tails", "emulator", "discarded", "bibles", "hangs", "joanna", "synonyms", "stranded", "horton", "dolce", "hercules", "pane", "browning", "angular", "veins", "folds", "sneak", "incorrectly", "avoidance", "sauces", "conquer", "probabilities", "immortal", "mariners", "endeavor", "creole", "mateo", "teas", "settling", "badger", "mohammed", "saturdays", "partisan", "pri", "gratitude", "impress", "willy", "anon", "eminent", "ribs", "communicated", "exceptionally", "quilts", "splits", "subscribing", "companions", "cheques", "edith", "screwed", "magna", "sectional", "fashionable", "polly", "tidal", "ballots", "hog", "testify", "poole", "boycott", "vitality", "clerks", "crust", "bothered", "traverse", "vengeance", "dolly", "garrison", "sal", "barb", "huns", "miner", "fashions", "barr", "analogy", "insomnia", "constituent", "aura", "cecil", "sponge", "sect", "diner", "anticipation", "enduring", "scarborough", "regis", "winters", "nous", "explosives", "mound", "xiv", "backgammon", "ox", "snatch", "mole", "obs", "owed", "ethan", "kissed", "buff", "butcher", "psalms", "rum", "chefs", "engraving", "constituted", "hamlet", "clad", "excursion", "inverness", "orb", "grange", "resigned", "fled", "enriched", "harrington", "brandy", "swings", "scion", "elle", "reptiles", "vortex", "swallowing", "purses", "bodily", "xiii", "awe", "beaumont", "australasia", "mandy", "hoods", "fireplaces", "requisite", "retrospective", "emphasizes", "lizard", "hawthorne", "bouquets", "wears", "shropshire", "baja", "regal", "safeguards", "cabbage", "cub", "spectator", "arrests", "circumstance", "numbering", "sliced", "reproductions", "byrd", "sidewalk", "prob", "breaker", "curly", "alberto", "asserted", "jealous", "refinement", "durban", "learnt", "hound", "squirrel", "concealed", "wharf", "rhythms", "departures", "shotgun", "stimulated", "chickens", "langley", "briggs", "cheyenne", "lug", "surveyor", "maize", "extinction", "unaware", "discretionary", "ry", "psalm", "scented", "gowns", "spying", "nicholson", "lied", "ek", "bloc", "recurrent", "talbot", "leaks", "tam", "swell", "obstacle", "ville", "mantle", "chico", "driveway", "irony", "gesture", "fairbanks", "parfum", "armies", "hy", "hugs", "greenfield", "santos", "owls", "cutters", "acquires", "ceased", "merging", "plaques", "breadth", "mammoth", "convictions", "intentional", "sophia", "prohibits", "innings", "reorganization", "pronunciation", "concession", "measurable", "ami", "parcels", "pastry", "manners", "phosphorus", "viper", "hid", "volcanic", "gypsy", "thieves", "preaching", "repeal", "uncovered", "hemp", "eileen", "proficient", "pelican", "apocalypse", "cousins", "discharges", "giorgio", "admire", "nk", "poured", "usefulness", "unsolicited", "binds", "unveiled", "burt", "titus", "suffix", "installment", "spindle", "heavens", "wink", "mister", "rounding", "inorganic", "flare", "scholastic", "wight", "withholding", "foliage", "nod", "ocr", "fife", "generals", "crank", "goats", "autographs", "stub", "fundamentally", "creamy", "exposition", "rains", "buckley", "middleton", "organise", "tort", "brace", "novelties", "gigantic", "abdul", "sheldon", "ryder", "octave", "struts", "ud", "suppress", "harding", "dams", "deserved", "violates", "rutherford", "separates", "proofs", "precedent", "confirming", "garth", "nolan", "mach", "facilitated", "paolo", "metaphor", "bridget", "infusion", "jessie", "organising", "argus", "mango", "spur", "jubilee", "landmarks", "polite", "sith", "thigh", "paving", "cyclone", "perennial", "jacqueline", "seventeen", "meats", "wie", "bulldog", "cleavage", "analysed", "uma", "gradual", "brethren", "embodiment", "violating", "recruited", "toilette", "trailing", "pact", "honourable", "lulu", "windy", "punished", "chronology", "mastery", "thermometer", "cranberry", "kan", "downhill", "vita", "steer", "nesting", "vogue", "aired", "outward", "whisper", "ipswich", "compromised", "confession", "deprived", "benedict", "vodka", "molding", "zaire", "bricks", "communism", "leopard", "flowering", "wig", "jingle", "bounty", "arcadia", "fishes", "ringing", "knobs", "taurus", "whiskey", "absurd", "tolerant", "stoves", "enactment", "embryo", "ska", "nora", "salts", "marietta", "furious", "iteration", "vida", "ceilings", "dispenser", "respecting", "approving", "unsafe", "separating", "soups", "residing", "richie", "markings", "moist", "trina", "drained", "mule", "cummings", "cessation", "append", "motive", "pests", "seasoned", "sunflower", "duel", "bernardino", "stocked", "bethel", "entre", "sunderland", "doris", "motives", "reinforcement", "dwight", "provost", "guessing", "tal", "mead", "harlem", "throttle", "gong", "ber", "sympathetic", "fridays", "isolate", "unconscious", "bays", "faulty", "affidavit", "messiah", "infamous", "pleasing", "seizures", "appealed", "surveyors", "tenacious", "waterfall", "sensual", "persecution", "petit", "burgess", "gaze", "chlorine", "freshly", "saxon", "cabo", "rye", "isabella", "monies", "assassination", "remarkably", "pointe", "stall", "deere", "entirety", "destined", "marcel", "lad", "hulk", "ora", "bal", "flores", "olivier", "portage", "dwellings", "informing", "yellowstone", "characterize", "ricardo", "yourselves", "rotterdam", "hostage", "cracker", "anglican", "monks", "compliment", "camino", "storey", "scotch", "sermons", "remembers", "freddie", "contention", "juliet", "adjunct", "guernsey", "bangor", "persia", "axes", "stirring", "wil", "haze", "pits", "utter", "bottled", "ants", "gastric", "influencing", "rents", "christy", "theirs", "mattresses", "donovan", "lax", "colts", "rehearsal", "strauss", "reputable", "wei", "tuck", "rei", "slab", "lure", "ren", "archbishop", "ling", "incompatible", "emblem", "roadway", "overlapping", "walters", "dunes", "murders", "miserable", "unsuccessful", "decorate", "appleton", "bottoms", "revocation", "vomiting", "chesterfield", "exposing", "pea", "tubs", "simulate", "medina", "thankful", "alaskan", "friedrich", "elephants", "pinch", "flynn", "braces", "calhoun", "deficient", "annotations", "filth", "moderation", "worrying", "outrageous", "kraft", "blackboard", "nitrate", "skates", "comstock", "hers", "grin", "footprint", "tunnels", "crises", "trillion", "comforter", "cashmere", "heavier", "meteorological", "spit", "labelled", "darker", "salomon", "globes", "dissent", "daly", "choral", "unrestricted", "happenings", "leicestershire", "neu", "contempt", "socialism", "hem", "edible", "anarchy", "arden", "clicked", "ineffective", "drawers", "byrne", "acme", "leakage", "shady", "chemist", "evenly", "reclamation", "rove", "lionel", "praised", "rhymes", "blizzard", "erect", "refining", "concessions", "commandments", "malone", "confront", "vests", "lydia", "coyote", "breeder", "electrode", "pollen", "drunken", "mot", "avis", "valet", "cheng", "shrubs", "watering", "barrow", "eliot", "jung", "transporting", "rifles", "posterior", "aria", "elgin", "excise", "poetic", "mortar", "blamed", "rae", "recommending", "inmate", "dirk", "posture", "thereon", "valleys", "declaring", "commencing", "armada", "wrench", "thanked", "arranging", "thrilled", "bas", "amelia", "jonah", "discomfort", "scar", "indictment", "apology", "collars", "andover", "pudding", "plato", "examiners", "salzburg", "rot", "possesses", "squared", "needless", "pies", "palma", "barnett", "ther", "heterogeneous", "aspirations", "fences", "excavation", "luckily", "rutland", "lighted", "pneumonia", "monastery", "erected", "expresses", "migrate", "carton", "lorraine", "councillors", "hague", "transforms", "ammonia", "roxy", "outlaw", "saws", "bovine", "dislike", "systematically", "ogden", "interruption", "demi", "imminent", "madam", "tights", "compelled", "criticized", "hypertext", "electra", "communal", "landlords", "emu", "libby", "seite", "dynamite", "tease", "motley", "aroma", "pierced", "translates", "mais", "cognition", "cain", "verona", "syn", "delegated", "chatting", "punish", "fishermen", "conforming", "causal", "stringent", "rowan", "assigning", "dwell", "hacked", "inaugural", "awkward", "weaving", "metropolis", "psychologists", "diligence", "stair", "dine", "enforcing", "struggled", "lookout", "arterial", "injustice", "mystical", "ironing", "commanded", "woodlands", "guardians", "manifesto", "slap", "jaws", "finn", "pedestal", "widening", "underwood", "saline", "sonny", "longevity", "paw", "isabel", "sterile", "botany", "dissolution", "pauline", "quart", "bison", "suppressed", "allegro", "materially", "cit", "amor", "xvi", "fungi", "phyllis", "bengal", "scrolls", "awakening", "fairies", "prescribe", "greed", "nominate", "sparkle", "autograph", "migrating", "refrain", "lastly", "overcoming", "wander", "kona", "relieved", "luc", "elena", "intermittent", "ante", "vols", "revolving", "bundled", "covert", "crater", "leah", "favored", "bred", "fractional", "fostering", "thence", "birthplace", "bleed", "reverend", "transmitting", "serie", "neptune", "caucasian", "goblet", "inventions", "dea", "practicable", "fronts", "ancestor", "russians", "incur", "canonical", "nodded", "confronted", "believer", "australians", "declines", "peacock", "utmost", "yates", "leroy", "helpers", "elapsed", "academies", "tout", "gre", "imitation", "harvested", "dab", "hopeful", "furnishing", "negatively", "residences", "spinach", "liquidation", "predecessor", "cheeks", "hare", "beasts", "philanthropy", "peanuts", "discovers", "discard", "cavalry", "breakers", "quorum", "forwards", "prevalent", "plat", "exploits", "dukes", "offended", "trimmed", "py", "worcestershire", "bonn", "prostitution", "mosque", "horseback", "vested", "terribly", "earnest", "homme", "clancy", "tory", "rossi", "oldham", "gonzales", "vor", "confederate", "presumed", "annette", "climax", "blending", "weave", "postponed", "philosophers", "speeding", "creditor", "exits", "pardon", "oder", "abby", "teller", "mandates", "siena", "veil", "peck", "custodian", "dante", "lange", "quarry", "seneca", "oceanic", "tres", "helm", "burbank", "festive", "rosen", "alla", "preserves", "ingram", "jess", "secretion", "insult", "scraps", "waived", "cured", "buggy", "kennel", "drilled", "souvenirs", "prescribing", "slack", "gin", "differentiate", "jays", "pilgrim", "vines", "susceptibility", "ambiguous", "disputed", "scouting", "royale", "instinct", "gorge", "righteousness", "carrot", "opaque", "bullying", "saul", "flaming", "apis", "marian", "liens", "caterpillar", "remington", "chew", "benefited", "prevail", "musik", "undermine", "omission", "boyle", "mio", "diminished", "jonas", "locke", "cages", "jolla", "capitals", "correctness", "implication", "pap", "banjo", "shaker", "natives", "tive", "stout", "rewarded", "athena", "deepest", "matthias", "duane", "sane", "climbed", "corrupted", "relays", "hanna", "husbands", "fading", "colchester", "persuade", "roaming", "determinations", "weighed", "ashamed", "concierge", "gorilla", "gatherings", "endure", "nom", "cheltenham", "dickens", "juniper", "repetition", "siberian", "preparatory", "fielding", "dune", "hee", "adler", "yosemite", "cursed", "youths", "migrants", "massey", "tumble", "stare", "unlocking", "missy", "meade", "contradiction", "helium", "wonderfully", "dug", "congenital", "trojans", "insanity", "embraced", "finely", "authenticated", "reformed", "tolerate", "lest", "adhesion", "tic", "noticeable", "cette", "aesthetics", "smoker", "benign", "hypotheses", "afforded", "aisle", "dunno", "blur", "evidently", "limbs", "unforgettable", "punt", "tanned", "altering", "bunker", "multiplication", "paved", "fabricated", "pasture", "richest", "cruelty", "mormon", "scots", "genuinely", "neighbouring", "plugged", "tyson", "souvenir", "mifflin", "cucumber", "occurrences", "marshal", "anders", "seize", "decisive", "spawn", "blanks", "dungeons", "sailors", "stony", "fayette", "shelving", "annals", "sadness", "periodical", "moe", "dime", "losers", "punta", "flavour", "crypt", "accomplishment", "onwards", "bogus", "carp", "prompts", "witches", "skinner", "dusk", "nouveau", "customary", "vertically", "crashing", "cautious", "possessions", "urging", "passions", "faded", "counterpart", "utensils", "secretly", "tying", "lent", "magician", "indulgence", "johan", "melted", "lund", "fam", "nel", "extremes", "puff", "galileo", "bloomfield", "obsessed", "flavored", "groceries", "motto", "singled", "alton", "staple", "pathetic", "craftsman", "irritation", "rulers", "collisions", "militia", "eis", "conservatory", "bananas", "adherence", "defended", "grille", "elisabeth", "claw", "pushes", "alain", "flagship", "kittens", "illegally", "deter", "tyre", "furry", "cubes", "transcribed", "bouncing", "wand", "cavalier", "ish", "rinse", "outfits", "charlton", "respectfully", "ulster", "tides", "chu", "weld", "venom", "writ", "patagonia", "dispensing", "puppets", "tapping", "immersion", "explode", "toulouse", "escapes", "berries", "happier", "mummy", "punjab", "stacked", "brighter", "cries", "speciality", "warranted", "ruined", "damp", "sanity", "ether", "suction", "crusade", "rumble", "correcting", "shattered", "heroic", "retreats", "formulate", "sheds", "anomalies", "homogeneous", "humphrey", "spheres", "belonged", "assigns", "sofas", "croix", "cushions", "fern", "defenders", "odessa", "lore", "whipped", "vox", "dinners", "rosie", "genealogical", "terre", "selfish", "eventual", "nach", "mitigate", "jamestown", "elisa", "shelton", "boiled", "neville", "natasha", "endeavour", "roswell", "haute", "herring", "unfamiliar", "expectancy", "deterioration", "proclaimed", "arid", "coincidence", "idiots", "mona", "muddy", "nuevo", "hitchcock", "cid", "neighbour", "raspberry", "illusions", "spikes", "enumeration", "suche", "permissible", "yielded", "nuisance", "siam", "latent", "marcia", "drowning", "spun", "shalt", "ric", "loch", "commanding", "sparrow", "poorest", "hector", "brotherhood", "milling", "sinking", "sulphur", "wicker", "balm", "figs", "browne", "nephew", "confess", "chit", "chaotic", "alexandre", "lays", "principally", "visor", "mundo", "jarvis", "drip", "traced", "outright", "melodies", "myriad", "stains", "sandal", "rubbing", "naive", "wien", "skeptical", "remembrance", "detects", "dragged", "foreman", "allegiance", "conduit", "dependable", "echoes", "ladders", "prudent", "glowing", "alchemy", "linden", "sven", "geographically", "alternating", "tristan", "audible", "folio", "presiding", "mans", "waterways", "aff", "fractures", "apprenticeship", "childbirth", "dumped", "barre", "rama", "johannes", "fiery", "convex", "richer", "mop", "urn", "soleil", "connor", "northamptonshire", "biscuits", "disclaims", "sich", "restless", "unanswered", "paired", "vaults", "ahmad", "tossed", "caucus", "cooke", "pillars", "katy", "zoe", "overwhelmed", "salute", "parody", "compensated", "lacked", "circulated", "soo", "maltese", "acorn", "bosses", "pint", "ascension", "ply", "mornings", "mentioning", "flagstaff", "pretoria", "thrive", "rightly", "paragon", "basal", "persist", "wilde", "indispensable", "illicit", "liar", "pledged", "pictorial", "curling", "ares", "smoky", "opus", "aromatic", "flirt", "slang", "emporium", "princes", "restricting", "promoters", "soothing", "freshmen", "departed", "aristotle", "finch", "inherently", "krishna", "forefront", "largo", "amazingly", "plural", "dominic", "skipped", "hereinafter", "nur", "extracting", "analogous", "hebrews", "tally", "unpleasant", "uno", "tempted", "blindness", "creep", "staining", "shaded", "cot", "plaster", "novo", "hearted", "obstruction", "agility", "complying", "otis", "overture", "newcomers", "noteworthy", "agile", "sacks", "ionic", "stray", "runaway", "slowing", "watchers", "supplemented", "poppy", "monmouth", "frenzy", "jargon", "kangaroo", "sleeper", "elemental", "unnamed", "doncaster", "particulars", "jerking", "bungalow", "bazaar", "predicate", "recurrence", "recruits", "sharper", "tablespoons", "supervise", "termed", "frauen", "stamping", "coolest", "reilly", "basque", "ire", "pegasus", "silhouette", "dorado", "daring", "realms", "maestro", "turin", "gus", "forte", "tipping", "holster", "fiddle", "crunch", "leipzig", "bard", "kellogg", "reap", "exemplary", "caliber", "apostle", "playful", "icelandic", "multiplied", "enchanted", "belgrade", "styled", "commanders", "thor", "waive", "bethany", "vance", "soprano", "polishing", "marquis", "wen", "translating", "frontiers", "adjoining", "greet", "acclaim", "hardship", "hast", "miriam", "cavaliers", "rollers", "carleton", "pumped", "differentiated", "sonia", "verifying", "almighty", "vel", "intuition", "revoked", "openness", "circulating", "bryce", "ilo", "latch", "verbs", "drank", "darlington", "slippery", "galerie", "outpost", "seville", "mira", "chatter", "santo", "lettuce", "raging", "tidy", "jong", "oppression", "bows", "yielding", "torso", "occult", "expeditions", "nok", "hooker", "lorenzo", "beau", "subordinate", "lilies", "articulate", "ecstasy", "sweetheart", "fulfil", "calcutta", "hobbs", "mediator", "tad", "cultivated", "rang", "disconnected", "consulate", "wilkes", "disagreement", "strands", "sicily", "compost", "adjourned", "familiarity", "erroneous", "pulses", "theses", "stuffing", "jeux", "wilton", "flooded", "reverted", "crackers", "greyhound", "corsair", "ironic", "wards", "unsupported", "hinge", "ultima", "cockpit", "venetian", "sew", "carrots", "faire", "laps", "memorials", "resumed", "conversely", "emory", "stunt", "excuses", "vitae", "hustle", "stimuli", "upwards", "witty", "transcend", "loosely", "anchors", "hun", "atheist", "capped", "oro", "liking", "preacher", "complied", "intangible", "compassionate", "substitutes", "flown", "frau", "dubbed", "silky", "vows", "macy", "distorted", "nathaniel", "attracts", "bern", "qualifies", "grizzly", "micah", "hurting", "homicide", "await", "sparse", "corridors", "sont", "mcdowell", "fossils", "victories", "chemically", "compliments", "cider", "crooked", "gangs", "segregation", "nemo", "overcast", "inverted", "lenny", "achieves", "forehead", "skye", "percy", "scratches", "conan", "lilac", "intellect", "charmed", "denny", "harman", "hears", "wilhelm", "nationalism", "pervasive", "auch", "enfield", "nie", "clears", "knowingly", "pivot", "undergraduates", "digestion", "mixtures", "soaring", "dragging", "virtues", "flushing", "deprivation", "delights", "foreword", "glide", "transverse", "engagements", "withstand", "newbury", "authorizes", "blooms", "soar", "uniformly", "todos", "piedmont", "empowered", "asi", "lena", "outlying", "slogan", "subdivisions", "deducted", "ezekiel", "totaling", "elijah", "compton", "vigorous", "flee", "biscuit", "creme", "submits", "woes", "waltz", "menace", "emerges", "classify", "paige", "downstairs", "statesman", "cheerful", "blush", "leaflet", "monde", "weymouth", "spherical", "favourable", "informs", "dramas", "cher", "billiard", "aut", "malay", "unseen", "optimism", "silica", "kara", "unusually", "widest", "impotence", "medley", "cadet", "redskins", "temper", "asserts", "stew", "hereafter", "retiring", "smashing", "accumulate", "tahiti", "mariner", "collier", "hush", "whispered", "generosity", "vibrating", "lama", "artisan", "akin", "raphael", "lola", "embarrassing", "aqueous", "pembroke", "stockholders", "lillian", "splinter", "ibn", "preferable", "juices", "ironically", "morale", "morales", "solder", "trench", "persuasion", "practise", "lodged", "revolt", "renders", "pristine", "francaise", "shines", "catalan", "auditory", "applause", "trait", "popped", "busted", "basins", "farmhouse", "pounding", "picturesque", "ottoman", "eater", "utopia", "insists", "willard", "lettering", "marlborough", "pouring", "concentrating", "soak", "buckingham", "hides", "goodwin", "manure", "savior", "dade", "secrecy", "wesleyan", "duplicated", "dreamed", "fertile", "hinges", "plausible", "creepy", "narrator", "augustus", "fahrenheit", "hillside", "standpoint", "nationalist", "piazza", "denoted", "oneself", "royalties", "abbreviation", "blanco", "critiques", "stroll", "anomaly", "thighs", "boa", "expressive", "infect", "pers", "dotted", "frontal", "havoc", "ubiquitous", "arsenic", "synonym", "yer", "doomed", "francs", "ballad", "sling", "contraction", "devised", "explorers", "billie", "ravens", "underline", "obscene", "mes", "hymn", "continual", "slowed", "aladdin", "tolerated", "quay", "outing", "instruct", "wilcox", "overhaul", "peruvian", "indemnity", "lev", "imaginative", "weir", "remarked", "portrayed", "clarendon", "ferris", "julio", "spelled", "epoch", "mourning", "phelps", "aft", "plaid", "fable", "rescued", "exploded", "padres", "scars", "whisky", "tes", "uptown", "susie", "batter", "reyes", "vivian", "nuggets", "silently", "pesos", "shakes", "dram", "impartial", "punctuation", "initials", "spans", "pallet", "pistols", "mara", "tanner", "avenues", "dun", "compress", "apostles", "sober", "tread", "legitimacy", "zoology", "steals", "unwilling", "lis", "paddy", "plunge", "pearce", "vos", "sinister", "burr", "arteries", "formations", "vantage", "texans", "diffuse", "boredom", "norma", "crosse", "mondo", "helpless", "wyatt", "spades", "slug", "visionary", "coffin", "otter", "navajo", "earns", "amplified", "recess", "dispersed", "shouted", "shilling", "resemble", "carbonate", "mimi", "discriminate", "stared", "crocodile", "ratification", "vases", "advises", "sind", "coward", "inequalities", "garde", "dyes", "viz", "turbulence", "yell", "fins", "ritchie", "dresser", "rake", "ornamental", "riches", "resign", "injunction", "intervene", "poised", "barking", "josephine", "dread", "dag", "handwriting", "serpent", "tapped", "articulated", "pitched", "wisely", "accustomed", "bremen", "steaks", "playhouse", "superficial", "suns", "josef", "casts", "bunk", "stab", "sanction", "dyer", "effected", "tubular", "moi", "ode", "avoids", "richter", "evidenced", "heinz", "argos", "dit", "larvae", "dyke", "cassidy", "kernels", "mobilization", "amt", "wilkins", "manipulated", "alleviate", "seam", "riddle", "comedies", "fainter", "respectful", "cabaret", "recession", "awaited", "nozzle", "externally", "needy", "wheeled", "booksellers", "darn", "diners", "greeks", "reich", "armored", "weary", "solitary", "photographed", "tweed", "snowy", "pianist", "emmanuel", "acapulco", "surrounds", "knocking", "cosmopolitan", "magistrate", "everlasting", "pigment", "faction", "tous", "argentine", "scandinavia", "minnie", "genie", "linn", "handel", "microscopic", "clarified", "coherence", "sensations", "orphan", "conferred", "acp", "disturbances", "chandelier", "embryonic", "carver", "paterson", "delle", "graceful", "intercept", "shouts", "ascertain", "veto", "exhaustive", "annoyed", "bureaucracy", "paz", "stalls", "fined", "bien", "inward", "reflector", "greeted", "hartley", "defenses", "meaningless", "clam", "francesco", "hes", "georg", "negligible", "starch", "melinda", "godfather", "apron", "guts", "ros", "pragmatic", "tyranny", "warehouses", "regimen", "axel", "antony", "hahn", "fluffy", "marianne", "slender", "hereford", "aides", "forma", "absorbing", "cherries", "gaelic", "gomez", "alec", "distinguishing", "glazed", "judd", "dashed", "libyan", "dickson", "distressed", "shouting", "bullock", "villagers", "acknowledgments", "ethiopian", "mermaid", "buds", "sexes", "wilder", "sire", "centred", "confinement", "islanders", "ding", "uncover", "contested", "coma", "husky", "conserve", "bland", "abatement", "originator", "whipping", "skipping", "routed", "rudolph", "abigail", "missionaries", "householder", "plotting", "yan", "succeeding", "elmer", "sails", "schuster", "overlook", "robes", "sham", "fungus", "astonishing", "graveyard", "chunks", "bourne", "revert", "ignores", "popping", "captains", "loaf", "pandora", "gabrielle", "stad", "abel", "enigma", "glands", "militant", "jug", "inferno", "torrents", "outset", "confuse", "yvonne", "attaching", "adept", "doubtful", "ratified", "insecure", "explosions", "trunks", "gareth", "versatility", "lothian", "fem", "intricate", "strata", "depository", "hubert", "proclamation", "beauties", "hybrids", "gillian", "darrell", "irrespective", "imposition", "ensured", "kidnapped", "sai", "cereals", "outrage", "poop", "scrubs", "orchestral", "bellingham", "dripping", "afterward", "devote", "facets", "musique", "frightened", "noises", "ambiguity", "booths", "discourage", "elusive", "speculative", "madeira", "intimacy", "hallway", "whey", "ripping", "mei", "hob", "reloaded", "garry", "ester", "annan", "thriving", "hampers", "bragg", "gracious", "snail", "curt", "demise", "theoretically", "grooves", "sutra", "conveyed", "swine", "typographical", "ellison", "ado", "trophies", "quicken", "werden", "heron", "graft", "moth", "crossings", "derrick", "mash", "germ", "envoy", "breckenridge", "pug", "antoine", "domingo", "resembles", "doorway", "grandson", "tat", "catalina", "redding", "accompaniment", "derivation", "warden", "voir", "tug", "margarita", "clans", "instituted", "notary", "thi", "sociological", "offending", "forgetting", "macedonian", "votre", "reservoirs", "barlow", "tyrone", "halle", "edged", "encompass", "spade", "hermes", "glare", "metaphysical", "insignificant", "exchanging", "pledges", "mentality", "turbulent", "pip", "pup", "fortunes", "sultan", "masked", "casing", "plotted", "haley", "generously", "amounted", "icy", "repression", "reaper", "honoring", "facto", "climatic", "broaden", "begging", "wharton", "sui", "freddy", "bushes", "contend", "restraints", "truncated", "gibbons", "nitric", "atop", "glover", "railroads", "unicorn", "normandy", "floats", "justices", "orderly", "wafer", "puck", "roofs", "reefs", "hover", "quarantine", "detrimental", "molds", "elias", "hou", "subsistence", "chilled", "foe", "citadel", "topography", "leaflets", "wrinkle", "contemplated", "adolescence", "nun", "harmon", "indulge", "bernhard", "hearth", "edna", "embarrassed", "aggressively", "coincide", "maynard", "genoa", "enlightened", "clippings", "radicals", "penetrate", "stride", "catastrophe", "greatness", "archie", "parasites", "entertained", "inventors", "ferret", "louisa", "agony", "marseille", "taller", "doubling", "stupidity", "moor", "stephenson", "enrich", "foreground", "revelations", "replying", "incapable", "parte", "acknowledgment", "labyrinth", "africans", "sway", "undergone", "lacey", "preach", "triangular", "disabling", "cones", "inversion", "thankfully", "taxed", "presumption", "excitation", "salesman", "hatfield", "constantine", "confederation", "petals", "imprisoned", "heller", "docks", "landowners", "sul", "juno", "deux", "defiance", "bully", "valiant", "constructions", "youngsters", "toad", "breasted", "banging", "vertigo", "unsatisfactory", "fluent", "rhyme", "eros", "aan", "mcintosh", "suffice", "convened", "nah", "accusations", "debated", "stallion", "equipments", "necessities", "camelot", "deserted", "keepers", "logically", "caravans", "oranges", "bum", "presse", "olga", "contends", "snort", "occupants", "organiser", "vim", "luminous", "crowe", "unparalleled", "anyhow", "waterfalls", "obtains", "antwerp", "ulrich", "hardened", "primal", "straits", "upheld", "wir", "malt", "sinai", "endowed", "cameo", "attire", "blaine", "typewriter", "pomona", "goddard", "fanny", "plagiarism", "milky", "combs", "upland", "unconstitutional", "adopts", "macao", "snaps", "defends", "depicts", "pilgrimage", "elevators", "ohne", "narrowed", "eighteenth", "hurst", "inscription", "ascent", "pisa", "tedious", "pods", "universally", "chewing", "accommodated", "tendencies", "rowland", "welded", "conforms", "reggie", "refreshments", "depict", "coils", "callers", "navel", "arbitrator", "prolific", "nurseries", "footsteps", "indefinitely", "sucker", "bumps", "frightening", "wildly", "sable", "retarded", "neatly", "singleton", "spaniel", "somerville", "worthless", "git", "spool", "jeopardy", "rovers", "voiced", "annoy", "clap", "aspiring", "dazzling", "cornelius", "scientifically", "grandpa", "cornish", "guessed", "kennels", "sera", "axiom", "stamina", "hardness", "abound", "curing", "socrates", "aztec", "confer", "vents", "mater", "oneida", "aiken", "crowned", "sandstone", "adapting", "cranes", "rooster", "proctor", "prehistoric", "balkans", "dictate", "joker", "wiped", "contours", "abdomen", "baden", "tudor", "paws", "villains", "poke", "prayed", "inefficient", "heirs", "parasite", "shortcomings", "cures", "concentrates", "preclude", "fasting", "loudly", "horseshoe", "zeus", "constellation", "recital", "utrecht", "freud", "bedtime", "thinkers", "hume", "reminiscent", "rapport", "ephesians", "dope", "truss", "kiln", "peaches", "depressing", "strangely", "narratives", "sud", "skipper", "gy", "drains", "maxima", "unification", "sous", "testimonial", "khaki", "distributes", "navigating", "slough", "prodigy", "embossed", "mould", "jock", "blasts", "poorer", "anglia", "dyed", "dissatisfied", "bourbon", "staggering", "bismarck", "hoe", "rubbed", "wasp", "bookseller", "fuss", "muir", "uterus", "chimes", "webber", "aggregated", "pico", "exhibiting", "gimme", "nee", "beaufort", "radically", "terminating", "platter", "chamberlain", "steamboat", "brewster", "inferred", "croft", "ism", "uplifting", "penal", "exclusions", "pageant", "henley", "purchasers", "pitchers", "tracts", "morally", "hosiery", "yt", "reptile", "overdue", "cowan", "mohawk", "riots", "hassan", "schwarz", "persuaded", "teasing", "rejecting", "emphasizing", "unbound", "quentin", "shepard", "sacrifices", "delinquent", "contrasting", "nestle", "correspondents", "guthrie", "imperfect", "disguise", "eleventh", "embassies", "lapse", "wally", "phenomenal", "civilizations", "friendships", "marjorie", "shrub", "kindred", "reconsider", "sanctioned", "parfums", "condemn", "renegade", "awaits", "hue", "augmented", "amends", "fullest", "shafts", "finer", "ys", "burdens", "invocation", "gillespie", "brooch", "motifs", "nineteen", "griffiths", "invaders", "edmond", "volunteered", "swollen", "liste", "grasses", "scatter", "steward", "ito", "cherished", "smack", "incidentally", "sine", "depleted", "holiness", "divinity", "campaigning", "tougher", "sherlock", "comprehend", "cloak", "pamphlet", "clipper", "umbrellas", "priceless", "mig", "assassin", "exploiting", "cynical", "toro", "etched", "bray", "choke", "underwent", "comforts", "appoints", "keene", "rachael", "swallowed", "imperialism", "mouths", "halter", "ley", "ike", "pumpkins", "shrinking", "roar", "novelist", "potomac", "arroyo", "tipped", "amidst", "insurgents", "wanda", "etching", "discouraged", "gall", "oblivion", "gravy", "inherit", "sprinkle", "stitching", "advisable", "loi", "meme", "gladstone", "jugs", "congregations", "handing", "payer", "ze", "beforehand", "laborer", "watcher", "vibrations", "apes", "strawberries", "abbas", "moods", "dobson", "ives", "soaked", "abridged", "palate", "thierry", "masculine", "realizes", "kahn", "petitioners", "constable", "sayings", "unconditional", "vue", "progressively", "topping", "baird", "chilling", "translucent", "glaze", "newcomer", "branching", "unmarried", "unexpectedly", "funniest", "bona", "scorpion", "mirrored", "sel", "anatomical", "misdemeanor", "tobias", "salle", "infra", "strasbourg", "commemorative", "implicitly", "ewing", "austen", "assurances", "comedian", "rascal", "nid", "roberta", "dizzy", "outbreaks", "annuities", "slit", "whitening", "occupying", "depicting", "ordnance", "verge", "ransom", "nomad", "dagger", "thorn", "preamble", "mor", "spins", "solicit", "provoking", "orchids", "buckets", "spoil", "blazing", "palermo", "snapped", "alligator", "detectives", "rochelle", "nomenclature", "abdullah", "invade", "regulates", "rendezvous", "strives", "trapping", "gardeners", "clemens", "deuteronomy", "diminish", "britannia", "manifestations", "tak", "stitches", "promulgated", "mediocre", "passports", "ayrshire", "invent", "eagerly", "damascus", "reformation", "hypocrisy", "parishes", "trooper", "bun", "compendium", "disappears", "hymns", "monotone", "palsy", "propositions", "locomotive", "debating", "cuffs", "prosperous", "famine", "orally", "elliptical", "grabbing", "jogging", "stipulated", "persuasive", "horrors", "bearer", "pastors", "acquainted", "dependents", "dizziness", "ture", "brilliance", "nicky", "originate", "respectable", "horace", "prohibiting", "disappearance", "morals", "invaded", "spoiled", "monet", "pickle", "quaker", "haunting", "manipulating", "tangent", "tempest", "petra", "dominique", "waving", "dai", "uneven", "plata", "plurality", "warrington", "adventurous", "luigi", "bayou", "accueil", "confluence", "blossoms", "succeeds", "orphans", "louder", "boilers", "reunions", "yelling", "trough", "leaned", "quadrant", "discrepancy", "slid", "antioch", "tonic", "magnus", "harrow", "jig", "reckless", "raining", "peasant", "vader", "qua", "figuring", "crushing", "thorpe", "ordained", "hodges", "saucer", "chinook", "passover", "byzantine", "tomas", "triangles", "curvature", "rites", "sideways", "devious", "dreamer", "acknowledging", "estuary", "burglary", "pouches", "thrilling", "spectacle", "sentiments", "ditto", "nana", "waiter", "oddly", "suchen", "raft", "cul", "nutshell", "arrogant", "hermann", "induces", "thrift", "sae", "admired", "stunts", "iaea", "youthful", "stumbled", "emitted", "sufficiency", "tempered", "slipping", "solitude", "cylindrical", "destroyer", "fide", "undesirable", "mongolian", "weakly", "parsley", "undue", "stunned", "smiths", "magyar", "hostility", "groves", "pursuits", "reflux", "adaptations", "jurisprudence", "invariably", "lecturers", "progressed", "brow", "elves", "kearney", "graeme", "kimball", "chant", "turnkey", "sprays", "tighten", "revolver", "crowns", "intermediary", "matted", "apricot", "tufts", "cuckold", "unreliable", "rosewood", "parry", "existent", "tongues", "dictator", "jehovah", "fanatics", "coeur", "perpendicular", "fay", "hedgehog", "raves", "mamma", "entails", "folly", "wheeling", "sharpe", "hawthorn", "mural", "bankrupt", "wager", "purge", "interpolation", "adjournment", "pitfalls", "stationed", "ambrose", "nightmares", "aggravated", "deem", "melville", "cavern", "ene", "sumner", "descended", "disgusting", "flax", "weakened", "imposes", "withdrew", "tart", "guerrilla", "spoons", "persona", "poser", "tram", "distinctions", "peabody", "alia", "iced", "faulkner", "scarcely", "excused", "fused", "madeleine", "roaring", "witchcraft", "stopper", "fibres", "cullen", "crested", "stump", "scalp", "gunn", "erwin", "conductors", "criticisms", "hadley", "diplomat", "sylvester", "melon", "tablespoon", "manganese", "siren", "clasp", "olives", "nino", "summons", "lucrative", "porous", "shrewsbury", "bile", "siegel", "cara", "ese", "ils", "hinduism", "elevations", "thirst", "endeavors", "sportsman", "scratching", "iodine", "phoebe", "wipes", "fro", "krone", "urgently", "exposes", "natures", "liberalism", "meer", "derry", "suisse", "frankenstein", "parc", "heir", "phy", "successors", "eccentric", "yarmouth", "transports", "amour", "illustrative", "prosecuted", "sailed", "craving", "advocating", "titel", "leaking", "escaping", "possessing", "suicidal", "cruisers", "masonic", "forage", "loco", "hellenic", "kwh", "ethel", "distinctly", "assertions", "baba", "pebble", "staffs", "ets", "hoo", "denomination", "patched", "patriotism", "battling", "tickle", "bandit", "acquaintance", "lambs", "loom", "blouse", "heightened", "chests", "ambitions", "feline", "grub", "ulcer", "slew", "menstrual", "canals", "negatives", "threading", "duet", "intolerance", "ammonium", "zephyr", "tearing", "muffins", "naar", "autor", "fannie", "foothills", "atrium", "thine", "superiority", "gestures", "nemesis", "engel", "confessional", "cardigan", "taunton", "evaporation", "devise", "abolished", "sorrento", "blanchard", "uns", "toying", "parma", "wreath", "plight", "opium", "irrational", "arches", "naturalist", "encompassing", "penetrating", "destroys", "prussia", "lowers", "cookery", "nal", "beatrice", "policeman", "cartilage", "turnpike", "migratory", "jurors", "mea", "enumerated", "sheltered", "doctrines", "seams", "pleaded", "pca", "elasticity", "cel", "gutter", "ulcers", "sloppy", "flannel", "volcanoes", "ridden", "contradictory", "misunderstood", "steamer", "cong", "barometer", "exclaimed", "diem", "barge", "spartan", "nea", "crystalline", "rumours", "famed", "brandt", "riga", "bengali", "respite", "grimm", "shetland", "provocative", "guido", "tasted", "licked", "banged", "rufus", "hopeless", "henrik", "safest", "daphne", "ame", "pollock", "meteor", "granville", "veneer", "anonymously", "manageable", "slant", "disciplined", "pollard", "comme", "chops", "broom", "plainly", "ibrahim", "snare", "shank", "uphold", "revising", "insignia", "nurture", "leash", "hunts", "faber", "plantations", "factions", "falmouth", "humility", "commentators", "impeachment", "acton", "engages", "carbide", "pullman", "characterised", "kinder", "deems", "outsiders", "dodd", "dissolve", "adrienne", "deduct", "crawling", "modifier", "muck", "colombo", "hoax", "cohesion", "reconnaissance", "antagonists", "bachelors", "observes", "corporal", "ligne", "wary", "locust", "condenser", "articulation", "villain", "tre", "oft", "secures", "leviticus", "impending", "rejoice", "pickering", "poisson", "bursts", "versailles", "hurdles", "lucie", "geese", "condemnation", "candies", "sidewalks", "formidable", "pun", "autres", "mecca", "rested", "paused", "macbeth", "abandonment", "nada", "bertrand", "broth", "wentworth", "seduction", "fertilizers", "maison", "contrasts", "giuseppe", "tae", "improperly", "nebula", "crows", "blooming", "mace", "seminole", "taper", "synagogue", "sugars", "burnham", "allure", "intestine", "ambassadors", "reclaim", "isla", "kingdoms", "richness", "converge", "pianos", "dol", "workings", "penelope", "extinct", "ponder", "revue", "lunches", "fooled", "smear", "rigging", "derives", "praises", "detachment", "luca", "caracas", "lids", "pore", "ey", "radiance", "oily", "quitting", "ina", "grover", "screams", "masking", "patchwork", "heinrich", "breton", "assures", "joys", "involuntary", "allegation", "infinitely", "dorchester", "serge", "morphine", "gymnasium", "waldo", "diese", "chiefly", "judah", "conjecture", "mich", "restitution", "indicted", "blasting", "confronting", "mastered", "powders", "debtors", "grit", "slain", "nearer", "ancestral", "mujeres", "faithfully", "revolutions", "sei", "quail", "tanker", "administrations", "sho", "rector", "ballast", "immature", "recognises", "taxing", "icing", "substituting", "executes", "originality", "pinned", "gables", "discontinue", "bantam", "bianca", "zimmer", "earthly", "conceive", "forfeiture", "disastrous", "gladiator", "poplar", "ence", "recourse", "martian", "equinox", "hinder", "fredericksburg", "presume", "weil", "armchair", "cecilia", "strut", "kari", "pavel", "appropriateness", "tame", "solstice", "oats", "italien", "wolff", "plume", "sparta", "calypso", "pantry", "etienne", "italics", "reversing", "murderer", "courteous", "wilt", "smoothing", "billet", "pretending", "hammock", "receptions", "revoke", "intruder", "wagons", "jennie", "platte", "plank", "paddling", "ting", "interrogation", "neue", "longing", "irresistible", "pilgrims", "disappearing", "sau", "enact", "inertia", "misunderstanding", "deity", "pruning", "agra", "mandolin", "rolf", "swiftly", "claws", "brightly", "manly", "emit", "shortened", "fearful", "potency", "ifc", "flawless", "peril", "alessandro", "breaches", "resultant", "nestled", "hairs", "dumfries", "drastic", "guarded", "celery", "reconcile", "grammatical", "collin", "ven", "admiration", "zanzibar", "offend", "severance", "somali", "combating", "numb", "retina", "maids", "tempting", "bureaus", "voyages", "galatians", "flo", "planters", "rocco", "sheath", "louie", "chaplain", "benefiting", "dubious", "occupies", "mammal", "shielded", "degeneration", "listens", "swirl", "emery", "twists", "scot", "intrigue", "blanche", "dialect", "nominating", "fanatic", "upton", "pave", "coverings", "danced", "slightest", "libre", "bromley", "revive", "corolla", "predominant", "abode", "savoy", "vogel", "insecurity", "trustworthy", "uniformity", "conquered", "alarming", "dur", "amused", "horizontally", "knitted", "exploding", "narrowly", "campo", "rampant", "suitcase", "embarrassment", "spectators", "coronado", "retaliation", "inquirer", "dreadful", "metaphysics", "drifting", "ritter", "attends", "nicer", "mellow", "boast", "gents", "respiration", "absentee", "duplicates", "dubois", "corollary", "tighter", "predetermined", "asparagus", "airy", "progresses", "canister", "stiffness", "thrifty", "canning", "workmanship", "complexities", "shan", "wrinkles", "illustrating", "perch", "craven", "divergence", "homage", "atrocities", "londonderry", "hops", "emmy", "chez", "admittedly", "ruiz", "angst", "liturgy", "nativity", "surety", "tranquil", "disseminated", "staircase", "cutler", "cradles", "electorate", "airs", "reconstructed", "resent", "opposes", "silvia", "distraction", "dominates", "kimberley", "despatch", "fugitive", "tucked", "jericho", "turmoil", "gilles", "dietrich", "haines", "unjust", "markedly", "fascinated", "disturb", "terminates", "exempted", "bounced", "rankin", "brightest", "saddles", "scotsman", "fitzpatrick", "gushing", "distracted", "secluded", "criticize", "bog", "livelihood", "godfrey", "minerva", "superseded", "iceberg", "caleb", "christening", "jealousy", "plumber", "hagen", "squeezed", "judas", "valle", "dole", "wick", "gertrude", "communists", "owes", "scents", "bertha", "levied", "sag", "barns", "covenants", "peat", "proprietor", "lizzie", "raids", "solos", "compartments", "maj", "foi", "importation", "mss", "planter", "ici", "metz", "immaculate", "pur", "reindeer", "telegram", "ruben", "shaken", "wares", "rivalry", "verve", "charley", "carpenters", "spree", "sunk", "morley", "bespoke", "inflicted", "abbreviated", "drowned", "escorted", "brute", "barracks", "kidneys", "warbler", "onward", "kidnapping", "inducing", "lancet", "antelope", "terminus", "castings", "flanders", "pellets", "enclosing", "starred", "deacon", "kabul", "sweeps", "butch", "mercure", "bookcase", "assembling", "diaphragm", "questo", "chores", "consignment", "yarns", "liv", "seedlings", "fortified", "reconsideration", "barnard", "profoundly", "bartender", "mayfair", "jag", "maneuver", "ridder", "vanished", "lair", "enclose", "sinners", "lille", "calves", "defer", "desmond", "liars", "els", "sod", "lacy", "pharaoh", "advocated", "itching", "alles", "devotional", "taft", "comparatively", "spartans", "tourney", "reasoned", "lawton", "degli", "saith", "astral", "ach", "parallels", "yelled", "wren", "terence", "hamper", "balkan", "blurred", "smuggling", "instincts", "hutton", "masquerade", "deans", "duality", "sensational", "kites", "smoother", "expulsion", "withhold", "romano", "grievances", "betrayed", "dumps", "buckles", "joyful", "generalization", "hin", "pancakes", "crave", "cordova", "focussed", "ripple", "claimants", "consolidating", "goldsmith", "inclination", "measles", "arcs", "portman", "baptized", "expelled", "rupees", "betrayal", "flourish", "heed", "mein", "graf", "hawking", "divides", "composing", "handicrafts", "healed", "burmese", "boon", "valor", "pedestrians", "gathers", "pawn", "stitched", "camille", "ceases", "dorsal", "collie", "hereditary", "exaggerated", "buccaneers", "spleen", "allotment", "jeu", "multiplying", "empress", "orbits", "whence", "bois", "trusting", "sabre", "stigma", "abduction", "attaches", "tartan", "twisting", "tore", "eth", "mimic", "shielding", "stormy", "vulgar", "pathological", "hodge", "trimming", "emanuel", "serene", "obligatory", "corrugated", "queenstown", "forbid", "unhealthy", "felicity", "ticks", "fascination", "sono", "experimenting", "splendor", "vigil", "robbed", "rebirth", "winona", "progressing", "fragrant", "defeating", "hotter", "instantaneous", "operatives", "carmichael", "bulky", "exponent", "desperation", "parlor", "setter", "monumental", "olaf", "fer", "stirred", "toughest", "fil", "facade", "frankfort", "monograph", "booze", "widen", "adjective", "disciple", "cipher", "arrears", "rhythmic", "unaffected", "starving", "vide", "lennox", "sil", "hearty", "triton", "deus", "devine", "adore", "entertainer", "colds", "dependant", "thicker", "weeping", "chandeliers", "moneys", "infancy", "dips", "honoured", "yachting", "cleanse", "chilly", "digs", "bolivar", "womb", "irritating", "monarchy", "corset", "hinged", "attendants", "cummins", "robins", "booming", "artikel", "scandals", "screamed", "cramps", "enid", "herrera", "digger", "espionage", "pups", "avenged", "norte", "glade", "pendulum", "bounces", "nehemiah", "thinner", "noch", "licks", "soto", "caste", "jus", "daft", "sampson", "psyche", "rudolf", "angling", "stubborn", "diplomats", "physicist", "tagalog", "coo", "requiem", "bleu", "redeemed", "sighed", "lures", "bavaria", "devastation", "heroine", "bingham", "achilles", "flaps", "indifferent", "cadence", "frosted", "schubert", "rhine", "manifested", "denominations", "interrupts", "rattle", "insults", "oatmeal", "marta", "distilled", "stricken", "unrest", "cascades", "druid", "dunbar", "outsider", "ris", "abstinence", "nag", "poodle", "wunder", "stefano", "sitter", "colder", "laborers", "whispers", "swarm", "elise", "ledge", "winthrop", "historia", "peasants", "nectar", "anecdotes", "gilt", "masterpieces", "symbolism", "monsoon", "drown", "strife", "esprit", "attaining", "consular", "treason", "reckon", "gaston", "prosper", "napier", "supremacy", "capillary", "germain", "islington", "anchored", "yong", "vers", "mulberry", "sinful", "cheeses", "bradshaw", "mythical", "abyss", "whitehall", "malachi", "ble", "clipping", "niece", "irresponsible", "pleas", "softer", "paralysis", "devastated", "tarzan", "shutters", "flask", "arisen", "femmes", "relentless", "ribbed", "omnibus", "stables", "inhabited", "hereof", "untold", "observable", "gretchen", "lanterns", "tulips", "vigorously", "interfering", "idols", "designating", "nugget", "reminding", "gusts", "xviii", "magistrates", "procession", "spiritually", "attentive", "rupture", "trad", "assimilation", "lyrical", "concorde", "angelica", "braided", "wooded", "intensely", "propelled", "artisans", "bastards", "bassett", "aspiration", "appended", "slammed", "aviator", "implicated", "seriousness", "conformation", "intimidation", "paladin", "ihr", "nests", "civilized", "marched", "cassandra", "cath", "sighted", "hopping", "destin", "rosary", "platoon", "andres", "loneliness", "pulley", "alleging", "synonymous", "confectionery", "regrets", "consciously", "cours", "footprints", "priscilla", "stimulates", "darkest", "implying", "conducive", "uncontrolled", "ballads", "mathew", "hugely", "sevilla", "hostages", "rosario", "fruitful", "franks", "indemnify", "satisfactorily", "thinker", "contestants", "sia", "influx", "convoy", "sled", "pyramids", "depended", "conveyance", "tortoise", "milo", "cultivate", "crocker", "dialogues", "abolition", "coax", "padre", "lees", "mari", "quattro", "foresight", "peppermint", "tod", "castillo", "remnants", "nailed", "alum", "frantic", "zachary", "comrades", "cocoon", "doth", "gladys", "bowers", "strengthens", "qual", "dictatorship", "breezy", "plow", "mundane", "douglass", "barclay", "foes", "cloths", "clowns", "lombard", "barren", "histoire", "plead", "behaved", "embargo", "condensation", "yokohama", "vow", "claudio", "blot", "primera", "commentator", "patterned", "sheen", "specter", "imam", "assent", "hove", "shading", "scrubbed", "warts", "roundabout", "harmed", "paternity", "conceal", "starvation", "appointing", "seine", "flowed", "sewn", "zulu", "rin", "barnet", "rift", "saviour", "lapel", "turk", "cupboard", "archipelago", "peep", "deceptive", "undertakings", "tinted", "congratulate", "constance", "vanishing", "legislator", "notifying", "aches", "kitchener", "leaked", "genera", "idioms", "gardiner", "gli", "poisonous", "chime", "spence", "mischief", "argent", "delinquency", "cou", "sentimental", "unsuitable", "mildly", "forging", "pew", "waitress", "caribou", "merced", "expansive", "footing", "manu", "sligo", "remit", "bonnet", "stumble", "undertook", "promenade", "exhaustion", "unborn", "wendell", "hammers", "coasts", "emitting", "concur", "exert", "madeline", "sanskrit", "torre", "worldly", "wedges", "corded", "heirloom", "pleasantly", "portray", "pero", "esoteric", "luxe", "messengers", "landings", "graphically", "shameless", "communicates", "bourgeois", "yeh", "napkins", "unloading", "bakers", "selma", "pears", "heats", "lucid", "lobe", "canaan", "oppressed", "infer", "prosecute", "thatcher", "bret", "hauling", "inconsistencies", "indebtedness", "scramble", "adversary", "elsa", "quaint", "oswald", "dipping", "revere", "troopers", "domaine", "olde", "guerra", "solemn", "eruption", "celeste", "gentry", "enchanting", "preached", "mica", "cadets", "lads", "endured", "ensuite", "fermentation", "careless", "chemists", "inca", "fad", "julien", "dandy", "narcotic", "moulin", "paine", "incompetent", "ain", "predecessors", "lancer", "sorcerer", "fishers", "invoking", "muffin", "motherhood", "wexford", "ihre", "dressings", "partridge", "synod", "noticing", "inte", "newmarket", "amigo", "discerning", "caddy", "burrows", "furnaces", "zee", "occupant", "livingstone", "juggling", "wildfire", "seductive", "scala", "pamphlets", "rambling", "kidd", "bedside", "lausanne", "legality", "arbitrarily", "heb", "luz", "regulars", "robson", "mysticism", "accompanies", "summed", "chopin", "torches", "dominating", "joiner", "viejo", "explorations", "guaranty", "procure", "stillwater", "sunsets", "cropping", "anastasia", "arrogance", "diverted", "forgiven", "bleak", "christophe", "wenn", "drudge", "dolores", "tramp", "saliva", "chichester", "artemis", "lessen", "weller", "syringe", "diversions", "admiralty", "powdered", "granger", "prevailed", "glacial", "alleges", "shredded", "antiquity", "zeal", "valparaiso", "blaming", "embark", "manned", "porte", "johanna", "granular", "sant", "orkney", "bah", "vero", "oscillations", "sphinx", "spiegel", "mujer", "ceremonial", "sonnet", "constituencies", "sprung", "hedges", "inflated", "crooks", "prospecting", "quilted", "walled", "immensely", "trafalgar", "relapse", "descend", "jakob", "bolster", "nietzsche", "fol", "rocked", "rancid", "disparity", "malice", "vom", "knapp", "swimmers", "syllable", "painfully", "sweating", "demolished", "catholicism", "trident", "lemonade", "absences", "andes", "ciudad", "josie", "persists", "propeller", "dents", "anarchist", "submerged", "entrusted", "essen", "calming", "intending", "cromwell", "drummond", "dissertations", "highlander", "solicitations", "lar", "punto", "survives", "darcy", "funnel", "moons", "gent", "thirsty", "freshness", "lathe", "shabby", "punched", "petri", "virgil", "gaa", "marbles", "cottonwood", "mildred", "deletions", "cleopatra", "undecided", "startling", "inductive", "inadvertently", "bursting", "wird", "halves", "moulding", "melancholy", "observance", "leaps", "halen", "galvanized", "hoy", "teapot", "conveys", "lends", "squire", "ache", "counterfeit", "waller", "duval", "yoke", "resonant", "mak", "outskirts", "expedite", "grayson", "sweetness", "crook", "rearing", "davison", "tins", "deliberations", "indifference", "xix", "invading", "dives", "loot", "coyotes", "stale", "cosmo", "levers", "cog", "incarnation", "strained", "putty", "reacted", "admissible", "sunless", "puzzled", "unexplained", "patsy", "thermometers", "fourteenth", "compounded", "chippewa", "eldest", "terrifying", "climbs", "uprising", "gasp", "swans", "tories", "hap", "remnant", "immoral", "sacrificed", "unequal", "weaken", "braxton", "categorical", "cupid", "stalking", "sturgeon", "jap", "piers", "ensuing", "mitigating", "tint", "dykes", "revived", "joachim", "eet", "earle", "hosea", "sua", "haste", "flakes", "alfalfa", "corfu", "argyll", "emil", "joking", "rhetorical", "simmer", "vert", "smallpox", "overwhelmingly", "waterway", "migrated", "reacts", "bain", "norbert", "complication", "aubrey", "adaptable", "sainte", "bitte", "fleur", "muy", "berth", "uninterrupted", "lint", "chalmers", "crabs", "tuscan", "lingo", "einer", "budding", "roam", "resemblance", "hackney", "toto", "hebron", "saber", "cataract", "midday", "fait", "innate", "medallion", "prominently", "kant", "nazareth", "nadia", "glanced", "calais", "rapture", "sunbeam", "abruptly", "beetles", "caspian", "impair", "stun", "shepherds", "susanna", "philosophies", "lager", "projecting", "goblin", "bluffs", "parrots", "anthems", "terrified", "nocturnal", "nueva", "emulate", "accuse", "hunted", "diminishing", "lew", "ridley", "produits", "zipped", "intrepid", "babel", "clustered", "primate", "eyebrows", "compromising", "willingly", "harlequin", "revisit", "insulting", "prominence", "cuckoo", "parrish", "inspires", "acacia", "fang", "netting", "contemplating", "erasmus", "sop", "recalling", "practising", "hermitage", "starlight", "foyer", "palaces", "brood", "azure", "compel", "contradictions", "festivities", "trenches", "sabine", "doorstep", "sniff", "dangling", "negligent", "gliding", "woe", "meditations", "tranquility", "halted", "liza", "drawback", "smyrna", "hostess", "weep", "posse", "mosquitoes", "commun", "weldon", "frying", "hesitation", "imprinted", "bereavement", "surrendered", "iam", "bestand", "westward", "converged", "leopold", "recognizable", "ludlow", "sprague", "saba", "embraces", "gustav", "waxing", "gael", "sinner", "auspices", "coles", "ergo", "dissenting", "melee", "radcliffe", "countess", "pleading", "crafty", "llama", "montague", "troubling", "vowel", "reuben", "cob", "fearing", "coronation", "isabelle", "reluctance", "inconsistency", "apostolic", "summoned", "treble", "galley", "shovel", "kam", "entail", "mashed", "aire", "pacing", "moan", "opec", "jimmie", "henson", "unfolding", "tottenham", "deserts", "milking", "wilbur", "suitably", "enormously", "aber", "cicero", "scribe", "nellie", "sleigh", "formulae", "fen", "sank", "frontage", "blister", "ration", "humid", "portrayal", "guile", "lacquer", "unfold", "hammered", "tutti", "mined", "caucasus", "intervening", "bale", "astronomers", "thrills", "therefor", "sores", "fel", "pastures", "unattended", "playwright", "carthage", "zechariah", "selves", "naturalization", "whispering", "dissipation", "sprite", "keel", "leighton", "atheism", "gripping", "cellars", "tainted", "remission", "praxis", "affirmation", "perturbation", "wandered", "reeds", "angler", "astounding", "cosy", "resend", "augment", "flares", "shedding", "glastonbury", "funerals", "eucalyptus", "conservatism", "questa", "bumped", "fortuna", "cripple", "lofty", "proclaim", "cropped", "merton", "ere", "richly", "ravi", "dogma", "priori", "vaguely", "yam", "ple", "siberia", "melons", "farley", "seer", "evils", "spontaneously", "unavoidable", "ruthless", "almonds", "ecclesiastes", "aptitude", "vial", "chao", "sharpening", "seniority", "prompting", "objected", "equator", "guilds", "blatant", "favoured", "ridges", "oysters", "gust", "cate", "receptacle", "mendoza", "haus", "puberty", "shorten", "shawl", "samaritan", "bends", "grimes", "unison", "tabular", "amir", "dormant", "nell", "restrained", "tropics", "concerted", "avenir", "refrigerated", "crouch", "pence", "formulating", "lamentations", "napkin", "emile", "contagious", "inaccessible", "administers", "crockett", "conspicuous", "barbarian", "soaking", "reforming", "gar", "intrusive", "thyme", "parasitic", "abusing", "receptive", "capt", "uwe", "xvii", "vulcan", "musk", "lucille", "executions", "refreshed", "guarding", "atwood", "windmill", "lice", "garter", "footed", "dedicate", "libros", "renewing", "burroughs", "ioc", "skim", "touche", "welt", "veal", "perpetrators", "embarked", "quickest", "euclid", "tremendously", "anglais", "smashed", "oscillation", "thunderstorm", "retrospect", "jog", "hailed", "bahia", "miraculous", "hounds", "tightening", "draining", "paroles", "sensibility", "rags", "punching", "distinguishes", "poi", "dazzle", "dangle", "eaters", "exceedingly", "inauguration", "inquired", "repentance", "unprotected", "merle", "savory", "evacuated", "reclaimed", "prefecture", "accented", "crawley", "baum", "racket", "hannibal", "sickle", "violently", "attest", "untouched", "comforting", "creeping", "kerosene", "appraised", "restorative", "chet", "peacefully", "stature", "sentry", "pel", "assaults", "berwick", "vices", "amo", "tolls", "degrading", "forster", "fireman", "maniac", "antics", "deze", "formative", "recognising", "wordsworth", "wrongly", "cree", "physicists", "falsely", "abbot", "officio", "consul", "plagued", "lahore", "aiding", "kunst", "suckers", "swallows", "patronage", "canoes", "matilda", "fodder", "impetus", "peeled", "whining", "arson", "hirsch", "tapestries", "transatlantic", "jak", "freeing", "kilkenny", "redress", "settles", "seaman", "skulls", "cayenne", "treatise", "defeats", "testimonies", "kali", "weitere", "itch", "withdrawing", "solicited", "jai", "gard", "brilliantly", "deja", "mccann", "spalding", "dill", "reopen", "potts", "erased", "resisting", "congregational", "antiquities", "dunham", "monsieur", "inhaled", "fuses", "britt", "blinded", "madras", "sacrificing", "faiths", "tinker", "sonora", "echoed", "elisha", "gazing", "skepticism", "zane", "eighties", "groupe", "freehold", "braid", "ance", "forester", "resisted", "alp", "munro", "agar", "arundel", "shiraz", "disgrace", "mediate", "rein", "realisation", "irritable", "cunning", "fists", "pennies", "jos", "hemorrhage", "awning", "ointment", "spilled", "tripping", "occidental", "vigor", "chariot", "buoy", "geraldine", "matrimonial", "squads", "niet", "tenn", "disclosing", "masthead", "ursula", "disbursements", "boucher", "chadwick", "candidacy", "hypnotic", "adultery", "fis", "seventeenth", "temperament", "prostitutes", "healer", "hive", "circulate", "glued", "sycamore", "belinda", "westmoreland", "shuts", "tenderness", "ocular", "smelling", "dung", "keine", "scratched", "conclusive", "alder", "polluted", "undersigned", "lark", "oda", "carlyle", "restores", "lullaby", "sanderson", "hoes", "lawns", "midas", "choking", "castor", "plentiful", "bonner", "stately", "raced", "deuce", "oma", "squirrels", "paddington", "drawbacks", "evoked", "dictates", "studded", "individuality", "spared", "anticipating", "californian", "brownie", "undressing", "quits", "ensign", "restraining", "blockade", "girard", "nearing", "ruff", "burglar", "warped", "tributes", "freezes", "knoll", "thinning", "reddy", "primrose", "parting", "humber", "michelangelo", "corduroy", "torpedo", "muffler", "troublesome", "eucharist", "wadsworth", "magnetism", "hodgson", "inventive", "speculate", "craze", "dispatches", "craftsmen", "desiring", "felipe", "hoffmann", "texan", "nombre", "grated", "submarines", "provoke", "romana", "accommodating", "grenoble", "calvary", "banded", "deportation", "harald", "cuttings", "invests", "sculptor", "kildare", "commended", "roper", "narrowing", "sergey", "mechanically", "profanity", "playmate", "scum", "seasoning", "adolf", "adjourn", "widows", "conveying", "precincts", "volta", "mediums", "discern", "bran", "fumes", "futile", "disqualified", "fenced", "eel", "animate", "faro", "resembling", "buren", "totem", "experimentally", "drinkers", "hermione", "indus", "harms", "asserting", "affluent", "ell", "protesting", "dix", "lonesome", "liberated", "unconventional", "amore", "reckoning", "fabian", "concurrence", "closets", "carve", "metaphors", "muster", "labourer", "heartfelt", "pertain", "democracies", "gideon", "mallory", "gauntlet", "martyrs", "cots", "victorious", "sylvan", "beverley", "unnatural", "swish", "confessed", "nae", "drumming", "patching", "fret", "abiding", "luscious", "sighting", "relic", "slipper", "augsburg", "bil", "argyle", "cling", "prophetic", "commune", "agatha", "tut", "haut", "gesellschaft", "circumcision", "neutrality", "aqui", "snoring", "trembling", "reproducing", "comets", "unitarian", "governs", "gums", "delaying", "mainz", "reconstruct", "toned", "erred", "modelled", "expiring", "mabel", "whistles", "jewellers", "kann", "caron", "understandings", "dared", "herndon", "nudge", "seeming", "rosebud", "alf", "andromeda", "sixteenth", "origination", "uso", "doves", "landowner", "preachers", "leiden", "ramona", "glib", "brutality", "fictitious", "francesca", "rumour", "immortality", "saffron", "ragged", "peerless", "constitutions", "improbable", "reiterated", "jesuit", "excessively", "mounds", "extraordinarily", "parted", "munster", "sufferers", "skunk", "interruptions", "placer", "lingering", "brooches", "heaps", "hydra", "anvil", "blinking", "sweetest", "noe", "dishonest", "stalk", "kun", "inert", "favorably", "vocation", "tribunals", "cedric", "favours", "witnessing", "eject", "seventies", "rayon", "dryden", "foreigner", "policemen", "unfavorable", "anomalous", "katharine", "barter", "rowley", "modifies", "frugal", "starry", "thanking", "nouns", "consequent", "entrances", "danube", "evasion", "filenames", "mayors", "gospels", "wicket", "cora", "lazarus", "vile", "misguided", "reunited", "conversational", "inspirations", "blasted", "shingles", "gresham", "cumbersome", "immersed", "philemon", "roasting", "accrue", "loire", "vented", "pont", "consolation", "cer", "frazer", "outlay", "dreaded", "airing", "alternately", "gracefully", "intrigued", "antagonist", "exalted", "cadre", "serb", "jaeger", "overthrow", "patiently", "cabot", "controversies", "narrated", "squat", "illuminating", "artificially", "saucepan", "freshest", "noi", "martyr", "hacienda", "koran", "quito", "tiara", "elegantly", "temptations", "skinned", "irrigated", "hives", "groundwork", "cyril", "kew", "resentment", "glaciers", "peri", "manfred", "gaping", "infringe", "porta", "inferences", "abrupt", "gambler", "dissection", "nightingale", "landau", "contemplate", "amigos", "putt", "colonization", "coon", "crock", "ailments", "disagreed", "boldly", "narration", "unopened", "insisting", "yeas", "brushing", "resolves", "sacrament", "cram", "shortening", "cloves", "marketable", "presto", "hiram", "broadening", "hens", "bowed", "whimsical", "harden", "molten", "repaid", "warmly", "hogs", "sporadic", "eyebrow", "strickland", "unnecessarily", "iom", "tess", "trois", "painless", "serbs", "verdi", "annexation", "dissatisfaction", "alpes", "applaud", "haben", "primo", "abolish", "climates", "uneasy", "busiest", "fray", "florian", "clogs", "flank", "cartel", "numerically", "perforated", "intensified", "sexton", "postmaster", "washes", "shrugged", "electors", "departs", "mindful", "lurking", "hitherto", "egyptians", "looms", "spectre", "downright", "refractory", "counsellor", "inexperienced", "outraged", "belgique", "smother", "frosty", "mules", "sash", "truro", "moaning", "ponies", "originates", "blight", "physique", "independents", "contentious", "cheering", "archibald", "emancipation", "duchess", "commemorate", "spout", "perish", "hoist", "narrower", "captivity", "peyton", "overloaded", "shorthand", "ceres", "bravery", "lizards", "einen", "fergus", "sincerity", "calder", "oar", "mullins", "flagged", "relics", "relish", "imagining", "belongings", "lire", "legislatures", "unchecked", "knocks", "alfonso", "contradict", "fleurs", "scarcity", "ashby", "fleeing", "filament", "abingdon", "theorists", "hof", "southwark", "celia", "disguised", "implanted", "thrash", "antiquarian", "dina", "fluency", "uniting", "behaves", "slabs", "conceivable", "agate", "incline", "hartmann", "bai", "soliciting", "thoroughbred", "calle", "oneness", "climber", "commonplace", "intellectually", "casanova", "himalayan", "downfall", "bookcases", "strides", "vanish", "ute", "transmits", "adair", "impatient", "aforesaid", "elbows", "truce", "bette", "stairway", "woodrow", "sou", "boar", "vertebrate", "laird", "multiplicity", "objectively", "resigns", "anguish", "petal", "perfected", "tomlinson", "odors", "mite", "blackstone", "clipped", "lago", "jed", "dries", "mejor", "sikh", "annoyance", "grating", "prostitute", "mina", "elixir", "guardianship", "gamblers", "autre", "peeps", "rol", "reverence", "sardinia", "outweigh", "verne", "gaylord", "bunting", "avenger", "spar", "waugh", "captivating", "tiers", "centurion", "propagate", "prosecuting", "montpellier", "willem", "slavic", "nutritious", "marguerite", "vapour", "pluck", "cautiously", "prick", "contingencies", "coercion", "picard", "rubble", "scrambled", "agitation", "chas", "truthful", "woodpecker", "herds", "corsica", "penetrated", "sein", "adder", "weakest", "weakening", "nome", "thorne", "anticipates", "poignant", "germs", "frees", "punishable", "fractured", "waterman", "brat", "uranus", "salient", "gabe", "censor", "semitic", "wits", "perverted", "bordering", "widowed", "tombstone", "begged", "flushed", "cautions", "lavish", "roscoe", "brighten", "vixen", "whips", "marches", "xxi", "anew", "commandment", "undetermined", "horner", "yah", "conceded", "circumference", "postpone", "disproportionate", "pheasant", "alonso", "bally", "zijn", "guillaume", "marrying", "carvings", "complains", "resided", "terriers", "weasel", "venerable", "preis", "toasted", "admirable", "illuminate", "holbrook", "fades", "bulge", "eller", "lucinda", "brittle", "bandits", "politely", "desde", "watermelon", "ingenious", "carols", "pensioners", "obadiah", "mannheim", "hepburn", "fetched", "alderman", "lockwood", "coughing", "hiatus", "upholstered", "evangelist", "louvre", "spurious", "gloom", "severn", "angelic", "astrological", "nobility", "bayern", "afternoons", "ramifications", "wakes", "ashore", "workman", "swimmer", "sitio", "unload", "loon", "marge", "wanderers", "sips", "badness", "undertakes", "miscarriage", "vulgate", "stoned", "provoked", "herr", "fables", "crumbs", "wort", "palisades", "confidently", "commences", "dispense", "dangerously", "figaro", "sadie", "protested", "capitalists", "accusing", "stink", "convent", "valdez", "childish", "adhered", "priesthood", "jagged", "dispersal", "overt", "verbally", "squeak", "constituting", "nuns", "pronounce", "scorpions", "incompleteness", "thurston", "dearly", "suggestive", "osa", "electrified", "unbalanced", "gypsum", "slime", "baroness", "winnings", "imaginable", "bromide", "lui", "crusaders", "summing", "lament", "gregor", "terraces", "canyons", "predatory", "towne", "descendant", "disgust", "banked", "rationality", "screwing", "dismal", "ranches", "cochin", "wipo", "prologue", "whaling", "patrols", "stumbling", "swung", "outlaws", "sinn", "waved", "libel", "ellipse", "alarmed", "justine", "jest", "garda", "eskimo", "caesars", "luce", "strapped", "reluctantly", "woodwork", "centrifugal", "authorship", "cavities", "buxton", "cravings", "decidedly", "pau", "apathy", "mercantile", "stalled", "infused", "peaked", "stronghold", "huxley", "moritz", "bearded", "greasy", "vowed", "carnage", "asher", "ingenuity", "mort", "infested", "creeks", "bessie", "adele", "ota", "rattan", "coroner", "irregularities", "tiled", "elaboration", "hectic", "lun", "snuff", "convene", "vai", "calmly", "horribly", "dilute", "contemplation", "sino", "uhr", "carta", "gaseous", "afflicted", "gloomy", "kirkwood", "orchards", "prophecies", "marques", "septuagint", "pertains", "clothed", "plummer", "italians", "talon", "repellent", "laval", "sorcery", "abstain", "elsie", "barring", "undermined", "tid", "bestowed", "habeas", "inactivity", "crewe", "grassy", "aprons", "clumsy", "columbian", "ayr", "pounded", "carrington", "stint", "rousseau", "sarcasm", "accomplishing", "overturned", "uphill", "maximus", "warmed", "parable", "jolt", "affords", "deadlock", "deriving", "quadrangle", "elects", "liebe", "eradicate", "likeness", "ral", "jem", "unter", "alpaca", "degrade", "flemish", "shred", "conseil", "steamed", "aroused", "remittance", "sieve", "bloch", "alienation", "reddish", "impulses", "interpol", "pleads", "whitby", "goliath", "caprice", "hors", "horned", "fowl", "janus", "hester", "benevolent", "superstition", "cohorts", "camilla", "rarity", "limbo", "shove", "accusation", "bernardo", "flake", "hating", "pate", "sewers", "spores", "mahmoud", "shears", "mucho", "flutes", "tabernacle", "minced", "westerly", "despatched", "munitions", "symmetrical", "ornate", "midwife", "uniformed", "snug", "coveted", "prohibitions", "moulded", "deceived", "convict", "nai", "tossing", "regularity", "criticised", "lawfully", "goethe", "slade", "dumas", "jester", "notifies", "recount", "dearest", "nook", "commensurate", "schiller", "bowler", "wiser", "gallant", "disbelief", "gon", "unqualified", "cautioned", "recollection", "locomotives", "condemns", "fastening", "jeweler", "nuremberg", "ostrich", "maud", "flirting", "misplaced", "prosecutions", "dido", "poisoned", "researches", "chou", "discriminating", "exclamation", "collingwood", "intercepted", "ascendant", "flung", "clovis", "eam", "railing", "cremation", "banter", "balconies", "awaken", "pigeons", "singularity", "signify", "granddaughter", "subdirectory", "bancroft", "progeny", "alters", "gratefully", "divergent", "fleets", "dorian", "juli", "tackled", "shoals", "tributary", "clique", "rosy", "satanic", "stubbs", "durch", "torment", "mussels", "emigration", "howl", "wel", "iglesias", "hir", "ecclesiastical", "crippled", "hilltop", "tabor", "peut", "tenet", "fifteenth", "chute", "bohemia", "mountainous", "fonds", "ogre", "unforeseen", "pickles", "submissive", "curses", "stampede", "utilised", "trieste", "whine", "nus", "fatality", "tierra", "looming", "zo", "sped", "ankles", "mosques", "fuchs", "guerilla", "squeezing", "fisk", "canes", "follower", "euler", "alumina", "degenerate", "spiked", "cru", "misrepresentation", "strung", "chanting", "wrestler", "officiating", "hermit", "behaving", "colbert", "josiah", "deepen", "acadia", "eso", "remy", "pats", "valentin", "mora", "cri", "enrico", "reciprocity", "crease", "wis", "ook", "bartholomew", "perseverance", "catalonia", "yorktown", "impede", "clasps", "tilted", "vicar", "confines", "prank", "dass", "repent", "dio", "agreeable", "riddles", "bennington", "pulpit", "appreciates", "marshes", "bellies", "corrosive", "ambush", "palazzo", "franciscan", "figurative", "gait", "emphasised", "bonfire", "aversion", "vicente", "stiles", "stewards", "chauffeur", "elicit", "henrietta", "slapped", "bitten", "lind", "salamanca", "martyn", "dynamo", "hobson", "stow", "summon", "skeletons", "parchment", "lingua", "distractions", "forfeit", "pepe", "paddles", "unpopular", "republics", "inspecting", "retainer", "hardening", "loosen", "beowulf", "undiscovered", "einem", "imputed", "cabs", "cheated", "willows", "hump", "delft", "communicative", "grieving", "chastity", "faust", "fright", "harbors", "adorned", "obnoxious", "diligently", "decays", "mortimer", "marvellous", "nouvelle", "easing", "mathieu", "picket", "thrones", "emilia", "eyre", "maturing", "seu", "illogical", "awakened", "beet", "suing", "brine", "lorna", "waning", "cartwright", "armoire", "piled", "twinkle", "lodgings", "maitland", "supple", "geld", "soi", "fabio", "unfit", "uttered", "rumanian", "shaggy", "elongated", "ordeal", "pegs", "astronomer", "incompetence", "flicker", "ramsay", "relieving", "towering", "operas", "slaughtered", "assaulted", "mena", "rouse", "appel", "armand", "spiel", "impurities", "stemming", "inscriptions", "hos", "tentatively", "tragedies", "interlude", "oates", "dialects", "vas", "ovid", "carcass", "casually", "scamp", "freedman", "reprise", "zig", "lash", "ills", "simms", "danes", "pebbles", "quicksilver", "sacked", "omen", "forfeited", "stipend", "conceptions", "lii", "amulet", "informally", "sarcastic", "indemnification", "hawke", "complexion", "daisies", "informant", "sorrows", "ite", "aegean", "andere", "sluggish", "brig", "tiempo", "marsden", "coy", "grouse", "reginald", "wierd", "pasted", "moths", "batavia", "evoke", "dispositions", "haywood", "staunton", "nit", "amorphous", "tributaries", "townships", "nantes", "assam", "mousse", "shameful", "chiffon", "archaic", "elevate", "deafness", "bec", "sala", "laureate", "contemporaries", "syphilis", "vigilance", "appalling", "palmyra", "foxes", "davie", "affixed", "ticking", "pantheon", "gully", "bitterness", "brill", "defy", "stor", "consumes", "lovingly", "agua", "thrush", "bribery", "smokes", "ventilated", "kettles", "ascend", "nutmeg", "chained", "magnify", "precautionary", "travail", "livres", "fiddler", "wholesome", "wrists", "severed", "mites", "puddle", "azores", "vegetative", "agora", "sob", "elaborated", "reeve", "embellishments", "willful", "grandeur", "plough", "pritchard", "mansions", "macpherson", "overheard", "persisted", "whereabouts", "haydn", "symphonies", "reclining", "rodrigo", "bounding", "annexed", "atheists", "umpire", "orthodoxy", "kilt", "doubtless", "keyed", "esquire", "cryptic", "primus", "wherefore", "cholera", "midsummer", "colouring", "intoxicated", "mysore", "jerks", "mise", "darius", "bullion", "deflection", "hateful", "propensity", "journalistic", "essences", "dispensed", "lemons", "stratum", "vendetta", "lod", "felicia", "restrain", "clutches", "cults", "whit", "amaze", "manassas", "rembrandt", "estado", "easel", "reisen", "potion", "ovation", "paddock", "numerals", "surpassed", "vino", "gable", "johnnie", "thirteenth", "laced", "quill", "saa", "mares", "enthusiastically", "fetching", "chaps", "tendon", "bellows", "keats", "deceit", "caro", "unmarked", "joyous", "boswell", "venting", "infringing", "blythe", "chisholm", "gunner", "verso", "samoan", "absorbent", "grossly", "cleft", "clog", "hongkong", "impoverished", "stabbed", "teaspoons", "comedians", "awnings", "sill", "lucknow", "bleaching", "isolde", "startled", "mathematician", "untrue", "algonquin", "hurried", "vir", "dieser", "staggered", "vacated", "vente", "fitz", "dura", "fingered", "apprentices", "cerca", "booted", "allie", "sens", "sprouts", "bower", "moab", "wolcott", "extremity", "orphaned", "requisites", "prudence", "kaufmann", "bij", "gingerbread", "biggs", "tasteful", "puritan", "osiris", "affirming", "salud", "excavations", "forearm", "distract", "seaport", "flashed", "longs", "dawes", "buns", "deceive", "civilisation", "starved", "amico", "colosseum", "stipulation", "emptiness", "maddox", "shoemaker", "cushioned", "dada", "osborn", "hastily", "ful", "invader", "patriarch", "consents", "nils", "polynesian", "swain", "lain", "groningen", "emilio", "mourn", "abandoning", "oddities", "soften", "troupe", "blacksmith", "suicides", "powerfully", "compromises", "helene", "thirdly", "classifying", "deepening", "unfairly", "connexions", "calico", "wrongs", "pores", "johnstone", "undermining", "burnside", "colossus", "frivolous", "indecent", "dishonesty", "oiled", "turnbull", "microbes", "sharpen", "phonetic", "oppressive", "coined", "tito", "moray", "simeon", "onslaught", "nationale", "noses", "treasured", "sharpness", "corral", "fortnight", "lia", "plunged", "reals", "modulated", "defiant", "brisk", "meath", "jena", "ponce", "perjury", "mua", "generality", "vigilant", "pronto", "vistas", "eerie", "arne", "stonewall", "wrestlers", "jackass", "geometrical", "priory", "epsom", "corpses", "wiping", "mercenaries", "bronchitis", "therese", "whirlwind", "howling", "apprehension", "raisins", "turkeys", "tio", "hora", "bobbie", "shale", "diligent", "nachrichten", "dann", "adversity", "wiggins", "torts", "egress", "adjectives", "crepe", "dum", "sheepskin", "concave", "heresy", "armory", "forthwith", "avert", "oat", "guise", "curiously", "fullness", "culminating", "kipling", "vomit", "compounding", "afar", "ebb", "shaky", "brutally", "pennant", "nicest", "willoughby", "necks", "lak", "mathias", "levee", "hindus", "powerless", "populace", "deliberation", "soles", "jetty", "luster", "overrun", "undone", "delia", "habitual", "alhambra", "mee", "uplift", "causeway", "murderers", "reopened", "guid", "inhabit", "lorenz", "conglomerate", "fastened", "tompkins", "extradition", "geschichte", "perils", "jerky", "proportionate", "compte", "algo", "boroughs", "deliverance", "resists", "lovell", "discourses", "subdued", "adhering", "falk", "suspicions", "hampered", "bruxelles", "detriment", "prejudices", "purported", "tron", "ine", "mangrove", "gab", "fawn", "scaffolding", "prin", "narrows", "sensed", "insuring", "babcock", "rhys", "boasting", "norah", "ascertained", "fluctuation", "jeannie", "ond", "twenties", "monstrous", "stetson", "accuses", "calibre", "nobles", "fumble", "attrition", "atherton", "lassen", "proverb", "darin", "mercenary", "clams", "reis", "tightened", "levies", "speck", "gutters", "murderous", "rudder", "amusements", "scares", "deformed", "wretched", "decadent", "incarcerated", "unsurpassed", "surpass", "annihilation", "pietro", "memoranda", "steaming", "magnifying", "serra", "hideous", "abreast", "intuitively", "extremities", "tyrant", "decency", "papal", "sprang", "palais", "obscured", "duets", "mountaineers", "blount", "butchers", "apologise", "geologist", "piccadilly", "axioms", "mogul", "fiercely", "varnish", "hysteria", "nei", "insistence", "aer", "clockwork", "mecklenburg", "intelligently", "fuer", "vials", "imputation", "albrecht", "densely", "droit", "odin", "colton", "distrust", "ulm", "assassins", "hatton", "fraternal", "refinements", "eloquent", "cwt", "silas", "wondrous", "decrees", "touchstone", "etext", "drayton", "grieve", "reigns", "pleasurable", "dobbs", "tunis", "olin", "bustling", "galt", "flue", "lucerne", "fiasco", "emir", "deacons", "slings", "dwarfs", "apportionment", "thoreau", "reins", "anson", "broadest", "scrambling", "misfortune", "drenched", "astonished", "kiel", "subconscious", "agi", "incandescent", "disappoint", "mobs", "cris", "rehearsals", "massa", "firewood", "serenade", "weathered", "truffles", "anno", "kepler", "teatro", "lawless", "gout", "coincides", "inhuman", "gentiles", "jardin", "fag", "rubs", "irritated", "despise", "floated", "fresco", "auteur", "custard", "prius", "dias", "hasan", "branched", "shipbuilding", "mildew", "tombs", "frown", "fulfilment", "accords", "privy", "caretaker", "antonia", "feeble", "gentile", "contractions", "combatants", "annuals", "champlain", "valence", "deteriorated", "droits", "disobedience", "gat", "unpack", "divination", "haw", "nationalities", "cultivating", "triumphant", "superbly", "hombres", "constrain", "magicians", "gra", "hobbes", "contended", "nazarene", "potsdam", "genevieve", "shiloh", "damper", "afrika", "forgiving", "yahweh", "madman", "sor", "slumber", "shimmering", "rigidity", "bane", "marius", "inventing", "chipped", "ane", "forts", "tumbling", "interprets", "surat", "dormitory", "confiscated", "discharging", "unnoticed", "ridicule", "thaw", "vandals", "reinstated", "lizzy", "unpacking", "darien", "intersect", "finden", "janvier", "garnish", "designates", "peeling", "levis", "blindly", "unintentional", "durant", "repertory", "toi", "disagreements", "gatt", "bene", "fifties", "goody", "dugout", "battleship", "talisman", "eels", "shun", "blackwood", "giggle", "worden", "deforestation", "streaks", "roderick", "bor", "corinth", "perverse", "glittering", "jails", "casket", "brigitte", "detour", "husbandry", "visibly", "defunct", "unveil", "circulars", "merciful", "ines", "tun", "tipperary", "kinship", "springtime", "philipp", "blouses", "hemlock", "sniffing", "uncanny", "stork", "concede", "combustible", "fallacy", "nicknames", "noxious", "tunic", "farce", "drowsiness", "chants", "ashe", "rhone", "lunatic", "pyrenees", "auctioneer", "recovers", "haggard", "manger", "chills", "whack", "drone", "breezes", "esteemed", "godly", "spire", "distillation", "edging", "langdon", "mathematicians", "soe", "cymbals", "antidote", "emblems", "caricature", "shroud", "stead", "recoil", "reconciled", "daze", "raisin", "amb", "amounting", "schon", "boer", "poisons", "nameless", "trot", "musically", "intensify", "voltaire", "harmonies", "benito", "accumulating", "indebted", "wald", "breathed", "misled", "mani", "culprit", "transact", "billig", "spiced", "berne", "pron", "puncture", "nella", "lighten", "practised", "canteen", "fein", "hysterical", "fick", "darkened", "requisition", "shrug", "boils", "enchantment", "greta", "covey", "donne", "pena", "loathing", "duc", "woof", "ominous", "parlour", "hammocks", "quieter", "poking", "tallest", "wrestle", "entrenched", "rectify", "virtuous", "ous", "davy", "snails", "decipher", "incapacity", "mittens", "ferns", "curls", "ens", "wrecked", "wince", "friendliness", "invincible", "healthiest", "prometheus", "rushes", "deities", "wor", "comanche", "melts", "trickle", "disapprove", "erratic", "familiarize", "insufficiency", "drifted", "propagated", "hardships", "sabres", "foraging", "wasps", "chien", "mitre", "tonnage", "corals", "mille", "continuance", "unrecognized", "premieres", "affectionate", "baptiste", "unimportant", "ferrara", "greener", "bowles", "endowments", "grudge", "zoological", "norse", "wetting", "bosom", "bales", "blackbird", "causation", "persecuted", "deciduous", "straighten", "convocation", "merrick", "precaution", "playmates", "philanthropic", "maneuvers", "stratified", "critter", "begs", "emphasise", "uit", "adresse", "connell", "busts", "cutaneous", "porters", "forgery", "pereira", "infrequent", "mull", "ort", "brandenburg", "incision", "jumble", "cognac", "wading", "imitate", "grasping", "borneo", "mortuary", "bode", "thorns", "rightful", "scarecrow", "mosaics", "pious", "utterance", "undeveloped", "basalt", "undisputed", "distracting", "urns", "unfolds", "brocade", "seaweed", "prevails", "candlelight", "votive", "wafers", "messina", "schumann", "tarts", "cuthbert", "nance", "babble", "pessimistic", "niches", "untill", "quid", "cadiz", "shortwave", "overlooks", "diversify", "hugging", "postman", "oas", "overboard", "goddesses", "faithless", "regained", "coolidge", "ephraim", "foggy", "shone", "criticizing", "leafy", "passionately", "stroking", "matured", "dolor", "procured", "excellency", "camels", "partie", "tou", "justifying", "eased", "slay", "deprive", "kremlin", "thea", "lusty", "virtuoso", "buzzing", "dauphin", "steed", "cowley", "paraffin", "unites", "stimulant", "realising", "millet", "invert", "vermilion", "grinned", "marche", "thelma", "enlightening", "endlessly", "hasty", "dexterity", "puzzling", "nods", "dieses", "sumatra", "nigger", "scrape", "kendrick", "prized", "arresting", "bewitched", "resumption", "irma", "intimidated", "traitor", "clove", "illiterate", "widened", "bordered", "mallet", "leech", "giver", "discontent", "gaz", "punishing", "seedling", "dwellers", "mouthpiece", "nymph", "reassuring", "astor", "myles", "prematurely", "frail", "adventurer", "irradiated", "awfully", "mayflower", "arched", "enlist", "vedic", "exemplified", "profane", "ubi", "cornelia", "romney", "macaroni", "electing", "dictation", "tage", "robber", "evacuate", "tus", "conveniences", "roving", "drinker", "softened", "peking", "fillet", "maar", "churn", "nimbus", "nog", "smartest", "neale", "ett", "madre", "impart", "feats", "concomitant", "donner", "scaffold", "oui", "ano", "millie", "libro", "leisurely", "loki", "dislikes", "mayonnaise", "dra", "limitless", "knopf", "hangman", "sloping", "mitt", "constitutionally", "disapproval", "bavarian", "crucified", "pocahontas", "masons", "surges", "literatures", "unlucky", "yawn", "distort", "mun", "wahl", "loosing", "canopies", "handicraft", "buscar", "piling", "basilica", "amine", "robbers", "juliana", "lowland", "sausages", "spake", "feud", "subordinated", "awoke", "unheard", "prune", "endanger", "cairn", "nomadic", "disgusted", "olfactory", "prolong", "fontaine", "knits", "thinly", "tant", "garnett", "galen", "arable", "parallelism", "brut", "vernacular", "latitudes", "alkali", "mowing", "foreseen", "palmerston", "sever", "expend", "stahl", "gist", "auntie", "afghans", "blames", "subdivided", "happiest", "lucca", "francine", "reserving", "nagasaki", "wid", "indented", "humming", "disclaim", "frans", "diameters", "exerted", "justifies", "freiburg", "regenerate", "titre", "tumbler", "bonne", "improvised", "flocks", "bothering", "garnered", "fling", "comrade", "ascended", "juliette", "porcupine", "chopping", "enacting", "stabbing", "metamorphosis", "hilda", "wanderer", "flattened", "dawkins", "spitting", "inconvenient", "seacoast", "imperfections", "lewes", "chancery", "raving", "hed", "executor", "anglesey", "choirs", "wreaths", "tasteless", "tomahawk", "tact", "projet", "instructive", "absorbs", "susannah", "toutes", "mathematically", "godwin", "drier", "bothers", "parades", "shoved", "invokes", "cannons", "hamish", "chromatic", "rife", "rallying", "enoch", "carriages", "dales", "polled", "agnostic", "emptied", "denounced", "delusion", "rimini", "verity", "turret", "precede", "huts", "betts", "domes", "eras", "wildest", "foodstuffs", "wessex", "priming", "vowels", "sulphate", "clandestine", "migrations", "hovering", "texte", "tamper", "pugh", "punishments", "dagen", "heathen", "unduly", "rigged", "domicile", "chargeable", "fanning", "meu", "spurred", "broughton", "wha", "osage", "peregrine", "tabitha", "puede", "crumb", "fostered", "culmination", "revolves", "mend", "theoretic", "softening", "glimpses", "hattie", "tastefully", "capo", "grate", "lourdes", "diseased", "kenilworth", "margot", "socialists", "deduced", "buttocks", "unmanned", "rainbows", "gunnar", "burials", "eunice", "bountiful", "salazar", "mesopotamia", "jetzt", "poseidon", "ratify", "mexicans", "fiend", "drapery", "bernice", "deported", "muzzle", "entrant", "schoolhouse", "retribution", "yusuf", "stallman", "slander", "basing", "baits", "fireside", "disposing", "herzog", "suffrage", "triumphs", "fortifying", "sleepless", "schiff", "watered", "lass", "fleas", "tully", "ventured", "recite", "kneeling", "negation", "dismay", "smelled", "jute", "heals", "prim", "trespass", "conciliation", "compasses", "groomed", "leaping", "impunity", "sunken", "inaugurated", "encountering", "infernal", "sewell", "pang", "swag", "reared", "pampered", "inquiring", "numero", "praising", "momentary", "commemoration", "favre", "poli", "holstein", "serpentine", "hangings", "lugar", "sundry", "protestants", "therefrom", "espace", "wrecking", "cristo", "pique", "swore", "novembre", "fawcett", "journeyman", "enlighten", "descartes", "flashy", "prowess", "abstractions", "enriching", "trampling", "signet", "bello", "iroquois", "digested", "rothschild", "trumpets", "embodies", "messe", "manhood", "kincaid", "cannibal", "nephews", "oblivious", "icao", "atmospheres", "stricter", "jeter", "memes", "roughness", "ancients", "snapping", "jethro", "cauliflower", "feudal", "unbearable", "perpetrated", "basses", "juni", "boarded", "olympian", "sedgwick", "livre", "mano", "interferes", "devotions", "myra", "devotees", "acquaintances", "sectarian", "fathom", "cools", "segundo", "appreciative", "innumerable", "parramatta", "noticeably", "furs", "atonement", "extant", "ignacio", "unmask", "chisel", "mysteriously", "wayward", "redness", "dreamland", "wands", "illustrious", "fishy", "nao", "pauses", "intoxication", "glimmer", "blooded", "slamming", "syllables", "whim", "filmy", "timid", "ismail", "tampering", "weavers", "magically", "pied", "thyself", "rooting", "pretended", "nigh", "therewith", "interment", "partitioned", "aller", "populous", "modesty", "veils", "frei", "zest", "sumptuous", "wayside", "spotless", "wich", "summits", "ner", "banc", "barbed", "legions", "dona", "lustre", "wer", "sunflowers", "sommer", "ecstatic", "campania", "blasphemy", "wisp", "countenance", "skinning", "sift", "ooze", "recounts", "adventurers", "oktober", "bigotry", "leaky", "contradicts", "leven", "pagans", "dinars", "diesem", "fume", "afloat", "bruised", "flattering", "brigades", "leur", "engrossed", "dashes", "impeach", "atrophy", "hur", "brag", "earls", "confers", "totality", "circumvent", "boulders", "negotiator", "yolanda", "muff", "maude", "odour", "bellamy", "snag", "fringes", "gough", "excavated", "smoothed", "affirms", "gulch", "irrevocable", "wieder", "moaned", "axles", "graciously", "radiated", "bribe", "propel", "outspoken", "verily", "ardent", "forcibly", "presided", "shimmer", "tremor", "gnp", "loaned", "violins", "extravagant", "ghent", "astute", "jamieson", "pemberton", "inflict", "invalidate", "ridiculously", "legible", "towed", "disregarded", "auguste", "puc", "salted", "attractiveness", "calamity", "brewed", "aristocrats", "fiance", "sprawling", "vulture", "mislead", "ventral", "twa", "retard", "medio", "platters", "canto", "germanic", "harassed", "discriminated", "estelle", "sponges", "cavendish", "receptacles", "jacinto", "revered", "harassing", "dislocation", "shingle", "timbers", "undergoes", "tilting", "conquering", "harass", "meditate", "hues", "alsace", "denominated", "ostensibly", "lumps", "facie", "emploi", "cretaceous", "fished", "drizzle", "bracing", "mesure", "blackmail", "corte", "remorse", "navarre", "clout", "jours", "wag", "fella", "mountaineer", "pondering", "purposely", "worshipped", "lucifer", "unholy", "spectacles", "dulce", "muttered", "aquila", "hoff", "mme", "spat", "henceforth", "argo", "strapping", "expedient", "unconditionally", "ices", "secreted", "buch", "chaucer", "livery", "recapture", "chevalier", "incompatibility", "anchoring", "navigable", "personas", "milieu", "stonehenge", "injure", "knuckles", "zoeken", "intermission", "amazement", "medusa", "pagoda", "manifests", "primed", "keg", "recited", "reformers", "ensued", "justly", "throats", "aron", "barrage", "pis", "pari", "buoyancy", "aussi", "curled", "raoul", "peeping", "paces", "heaviest", "walnuts", "ena", "broadened", "lashes", "esplanade", "prairies", "mandel", "conical", "tricked", "etymology", "cheaply", "allege", "draped", "subtly", "manslaughter", "consort", "shad", "fleeting", "sibley", "plumb", "needlework", "caballero", "annoyances", "uti", "bacchus", "chuckle", "unfolded", "israelites", "rit", "briar", "wavy", "moulds", "hindered", "bloated", "pranks", "mantel", "languedoc", "fatima", "disordered", "belated", "englishman", "winder", "paralyzed", "junta", "shrunk", "crammed", "aar", "hatchet", "unsuspecting", "dismissing", "cetera", "windfall", "filaments", "jocelyn", "companionship", "creeper", "cuando", "epidemics", "illegitimate", "slag", "undisturbed", "transcendental", "georgina", "chantilly", "farmed", "fuentes", "malo", "complicate", "alston", "indistinguishable", "skillful", "groot", "compensating", "overrated", "reasonableness", "nuances", "knuckle", "bastion", "scraping", "gypsies", "concurring", "assemblage", "watery", "tro", "juanita", "coiled", "yucatan", "sipping", "beatrix", "cheerfully", "sledge", "gilded", "murdering", "dijon", "unbroken", "sages", "tropic", "capella", "beim", "condemning", "entourage", "travers", "familia", "iota", "realist", "suppressing", "scorn", "crusades", "pris", "whirl", "pervert", "defences", "humiliating", "circled", "withers", "sprout", "elicited", "swirling", "campos", "clinging", "bunches", "bagged", "negotiators", "deviate", "blackened", "whereupon", "muriel", "hostilities", "atelier", "penned", "conte", "horatio", "cheered", "bled", "throbbing", "sleepers", "seiten", "zeit", "sallie", "solace", "lucien", "havre", "moles", "unloaded", "projectile", "transplanted", "bandages", "handcuffs", "beacons", "stucco", "intrinsically", "geschichten", "impervious", "shams", "shawls", "aos", "flourishing", "precedes", "bruises", "instructs", "palatine", "lor", "carnation", "kangaroos", "slum", "ruffle", "knack", "rivet", "aragon", "aggie", "tilly", "sonya", "haue", "grunt", "talmud", "grammars", "overalls", "doubted", "ravaged", "whistling", "upholding", "ailing", "obeyed", "tattooed", "ghostly", "mutiny", "delusions", "foresee", "rations", "bitterly", "windmills", "perpetrator", "cleverly", "misunderstandings", "amerika", "counsellors", "amis", "sisterhood", "lightening", "overturn", "doit", "thoughtfully", "mortem", "rencontre", "risking", "proprietors", "tatiana", "ingress", "gros", "barbers", "retires", "duro", "commotion", "deduce", "bolted", "materialism", "eternally", "senseless", "rabid", "reassure", "recollections", "probed", "pox", "hamlets", "unwritten", "jammed", "moveable", "housekeeper", "agrarian", "humana", "lovable", "sawmill", "abram", "catharine", "consented", "perseus", "styx", "congested", "banished", "terraced", "buttermilk", "laces", "toil", "hugged", "flurry", "gower", "warmest", "horrified", "walpole", "cada", "alte", "bertram", "perturbations", "adversaries", "aunts", "mau", "vapors", "skylight", "gemma", "constantinople", "monarchs", "unsolved", "strenuous", "roost", "unreasonably", "shuffling", "ludicrous", "tenets", "albanians", "pius", "garb", "steadfast", "reckoned", "promissory", "overflows", "queried", "squarely", "softness", "crayon", "rotting", "exhilarating", "excepted", "flavoured", "marque", "ditches", "millionaires", "evade", "pars", "scourge", "twig", "lapis", "bandage", "detach", "virginity", "mala", "doctrinal", "adaptability", "cramped", "wept", "ganz", "racking", "corrects", "avignon", "servicio", "vanishes", "obedient", "selkirk", "mur", "sects", "modo", "anxiously", "ascribed", "strikers", "optimist", "gratification", "seashore", "automaton", "otros", "pierson", "unskilled", "brigadier", "consonant", "acetic", "unarmed", "dyeing", "intolerable", "republished", "tawny", "absinthe", "hygienic", "sufferings", "tahitian", "propagating", "sacraments", "layman", "vellum", "ignatius", "emperors", "ferro", "stalks", "stanza", "londres", "terminations", "novices", "grasped", "bequest", "deo", "beggars", "redeemer", "florin", "quixote", "chaise", "paternal", "dey", "rained", "indigent", "trellis", "trabajo", "mythic", "crystallization", "marries", "echoing", "recitation", "aptly", "alleviation", "liege", "remittances", "romances", "nieces", "characterizes", "papyrus", "fop", "candlestick", "circling", "hellas", "sheik", "pints", "girdle", "siamese", "veiled", "blotting", "intimates", "eruptions", "javelin", "ipsum", "stares", "eastward", "tecumseh", "yon", "entree", "desist", "grasshopper", "rheumatic", "autobiographical", "piety", "embody", "petites", "gris", "crawled", "soiled", "dich", "froze", "superfluous", "gai", "disarm", "sot", "tacit", "chansons", "parenthesis", "reorganized", "daybreak", "rallied", "quakers", "pentecost", "beulah", "unveiling", "burg", "astray", "blisters", "infirmary", "hinted", "sanctity", "gad", "modus", "pedantic", "beste", "dennison", "grandes", "bullies", "notoriously", "lucius", "kirsty", "caustic", "rook", "gleaming", "dominoes", "tua", "parochial", "bertie", "moreau", "precedents", "exiled", "howells", "pall", "mustered", "pretext", "whisk", "flared", "kleine", "deference", "artful", "eld", "audacity", "margate", "judson", "downwards", "moat", "inasmuch", "plotters", "caress", "hails", "swam", "wilfred", "mauve", "hazy", "twitch", "alegre", "glorified", "combed", "reclaiming", "baptists", "paraphrase", "flounder", "crept", "fibrous", "swamps", "epilogue", "hoof", "epistle", "exiles", "wheatley", "clapping", "finesse", "sociale", "cordelia", "infrequently", "favoring", "converging", "cour", "firma", "inquisition", "reputed", "dinah", "seduce", "bearers", "kimono", "guesses", "foote", "impossibility", "ceylon", "courant", "invasions", "eminence", "canna", "liberate", "gracie", "gunther", "hanged", "flatter", "acquitted", "dimmer", "sola", "cauldron", "dredge", "tingling", "preferring", "cordial", "reassurance", "superintendents", "nervousness", "delineated", "imaginations", "quarrel", "bess", "aryan", "tendering", "transitive", "furthering", "connoisseur", "idealism", "separable", "awa", "liqueur", "spokes", "pastime", "pursues", "bugle", "luxemburg", "disperse", "incoherent", "fours", "treffen", "devout", "strom", "alva", "unfurnished", "blinding", "inaction", "northward", "trotter", "subversive", "contre", "impediments", "armoured", "breathless", "intertwined", "steen", "corkscrew", "trop", "affections", "inherits", "mortals", "purgatory", "vise", "comer", "tillage", "pere", "discloses", "easterly", "lagged", "hawker", "vertebrates", "toughness", "disrespect", "lagging", "uncovering", "indeterminate", "refreshment", "momentarily", "festa", "langer", "lute", "rosette", "changeable", "tragically", "waverley", "clapham", "trumps", "justifiable", "twofold", "sicilian", "marlowe", "unearned", "thwart", "potted", "chanson", "amelie", "incurring", "gracias", "convalescent", "terme", "mackerel", "goings", "brim", "clinch", "provident", "leprosy", "chum", "cometh", "fitter", "glut", "fasten", "locksmith", "interrupting", "sulla", "daggers", "pleases", "moors", "arno", "geranium", "kendal", "revolve", "choc", "waged", "waxed", "concourse", "confine", "jaded", "mingle", "purify", "desolate", "withdraws", "choked", "whereof", "pape", "gruesome", "pleadings", "defying", "sacs", "perished", "erskine", "tentacles", "britons", "pringle", "outcast", "faraday", "oblong", "ophelia", "wearer", "propriety", "attainable", "hearsay", "roomy", "brutus", "obscurity", "heros", "colonists", "matting", "overflowing", "capers", "entice", "lasso", "soot", "yonder", "virulence", "heretic", "draught", "comical", "generalizations", "waiters", "gasped", "geologists", "caverns", "boarder", "bumping", "eines", "greets", "ova", "waxes", "whiz", "bevel", "straining", "seduced", "angrily", "croquet", "vacate", "stanislaus", "soundness", "marquise", "bonjour", "xxiii", "protracted", "siegfried", "affaires", "digby", "eyelid", "undeniable", "taming", "precluded", "repressed", "perforce", "barons", "boundless", "hopelessly", "grandchild", "sucre", "pasteur", "valuables", "indignation", "sprinkled", "menstruation", "stuffs", "antichrist", "emptying", "reiterate", "himalayas", "monopolies", "sowing", "frills", "wad", "shearing", "ruining", "pinion", "yew", "windward", "hermosa", "haunts", "unsere", "brawl", "delirium", "unfounded", "heroism", "gillis", "rutledge", "barrister", "neglecting", "saxony", "karel", "vane", "alienated", "tum", "synagogues", "entangled", "mane", "reise", "liberating", "embarking", "tonneau", "cynicism", "bayonet", "considerate", "extraneous", "janitor", "environs", "reverses", "reunite", "hawkeye", "steers", "ravenna", "crockery", "juries", "presidente", "nang", "gare", "legacies", "tial", "theologians", "arnaud", "enticing", "embankment", "quadruple", "crazed", "xxii", "equipping", "fondly", "whither", "counteract", "sighs", "discouraging", "flasks", "preservative", "tribulation", "bridesmaids", "rhea", "raided", "salaried", "mises", "intolerant", "rarities", "battled", "obstructions", "discredit", "grotesque", "artistes", "perugia", "gij", "spoils", "monasteries", "crucible", "modena", "generalize", "hasta", "pronouns", "misconception", "rudimentary", "sown", "protege", "vulgaris", "beak", "settler", "prag", "rabble", "rung", "piped", "orpheus", "retour", "insurgent", "rightfully", "hilfe", "medici", "fabrice", "marshals", "nue", "crumbling", "relegated", "allotments", "immer", "stagnant", "giacomo", "follies", "dells", "cleanly", "unclean", "seizing", "molasses", "tablecloth", "hutchins", "purifying", "delineation", "schooner", "dignified", "numbness", "papier", "machinist", "anima", "apologized", "meshes", "grotto", "marais", "loam", "politique", "carnations", "rivets", "jeune", "hatching", "leveled", "graces", "corinne", "adheres", "collusion", "rawhide", "propos", "knotted", "agitated", "sorter", "misused", "relieves", "linguist", "rigorously", "erroneously", "especial", "betray", "dario", "cui", "heywood", "suspending", "mormons", "davids", "bennet", "proclaiming", "purposeful", "undress", "procrastination", "hemel", "gauze", "precepts", "constellations", "gazed", "skips", "forceful", "fuente", "magdalena", "rut", "sehr", "hera", "subterranean", "rumored", "galicia", "amuse", "villager", "fixer", "condensing", "emanating", "assassinated", "brodie", "untimely", "associating", "romp", "idiom", "tangle", "legitimately", "congratulated", "couriers", "unwelcome", "concurred", "upsets", "sceptre", "confederacy", "matinee", "snatched", "plunder", "maa", "impromptu", "searchers", "gamut", "czar", "putney", "shattering", "refute", "amphibious", "mush", "shudder", "eyesight", "parson", "infidelity", "firemen", "contrived", "exhausts", "opposites", "dreamers", "foal", "hesse", "hesitated", "precarious", "hodder", "pease", "testifying", "topographical", "instructing", "dreary", "crispin", "horrid", "dryness", "wreckage", "paras", "captives", "despised", "conqueror", "innocents", "unprepared", "dost", "treacherous", "filet", "infidel", "volley", "carnal", "larceny", "versed", "confronts", "parliaments", "mitigated", "youngster", "enigmatic", "bridle", "stretcher", "cosa", "enfants", "leila", "berliner", "effecting", "hallucinations", "unravel", "smugglers", "intimidate", "rubens", "galilee", "frenchman", "tiller", "orifice", "bragging", "hordes", "beryl", "ferre", "forerunner", "grinning", "slashed", "watchful", "appalled", "silenced", "vanities", "evaporated", "affliction", "zag", "intestines", "saute", "iba", "schuyler", "idyllic", "satchel", "peruse", "revel", "alleys", "crucifixion", "hearn", "madly", "stiller", "experimented", "comming", "steeped", "gripe", "summa", "eyelids", "thereupon", "archers", "steamers", "bubbling", "forbids", "disdain", "exhausting", "absurdity", "magnified", "horsemen", "alabaster", "reigning", "deane", "georgie", "zara", "bribes", "kidnap", "coercive", "romanticism", "luo", "forme", "reinstate", "unthinkable", "lowly", "outburst", "scant", "mattered", "fitzroy", "ove", "raspberries", "sorely", "pail", "obtainable", "elvira", "mastiff", "drummers", "reformer", "solemnly", "liberally", "dahlia", "concentric", "loin", "ved", "unwarranted", "marmalade", "sandoval", "applauded", "ravine", "exponents", "brice", "ressources", "californians", "procuring", "pours", "leer", "nave", "arranges", "valhalla", "adoration", "amity", "superiors", "decanter", "starve", "leek", "shortness", "fronted", "lightest", "banquets", "picnics", "compulsion", "prerogative", "abscess", "paraphernalia", "heretofore", "memento", "lina", "tumbled", "masterful", "insoluble", "cockburn", "harwich", "casas", "semper", "repressive", "clos", "sweeter", "mattie", "deutscher", "spilling", "saucers", "gondola", "elizabethan", "hein", "spines", "reiter", "amphitheatre", "stupendous", "flutter", "acumen", "absolut", "shiver", "lumiere", "shatter", "pickled", "nieuwe", "hades", "superimposed", "burdened", "randal", "dandelion", "nuance", "classmate", "catechism", "driftwood", "rosalind", "giorni", "juin", "bigelow", "anointed", "mythological", "interspersed", "horseman", "nervously", "intruders", "chaparral", "nya", "decaying", "vez", "muses", "padlock", "oars", "gilead", "classed", "informer", "freer", "toute", "calabria", "dismantled", "overcame", "exertion", "solidly", "affidavits", "weaves", "chimera", "handkerchief", "foaming", "tailors", "barbarians", "splendour", "niveau", "sheriffs", "tassel", "admiring", "harmonized", "khartoum", "leans", "frankreich", "baffled", "wasteful", "hertford", "tripoli", "refraction", "grainger", "penzance", "fillets", "aztecs", "consults", "hoi", "foils", "retract", "inaudible", "nurtured", "frantically", "buoys", "tait", "disintegration", "theologian", "aquitaine", "sigmund", "individualism", "starboard", "precludes", "burdensome", "brest", "renown", "murky", "truthfully", "deutschen", "tongs", "perpetuate", "vigo", "cabal", "musa", "materia", "interwoven", "beggar", "pard", "extinguished", "silhouettes", "abundantly", "declination", "excesses", "mucous", "poked", "caricatures", "artiste", "bogen", "repose", "hasten", "tendered", "temperance", "risque", "resembled", "helpfulness", "omitting", "earthy", "adored", "embellished", "feathered", "aggrieved", "hacer", "assisi", "aggravating", "insulted", "fugitives", "passe", "anecdote", "partake", "pseudonym", "altitudes", "carolinas", "strikingly", "zy", "rancher", "morn", "bodyguard", "gnats", "solon", "eduard", "detract", "portraying", "pitted", "enlarging", "wrecks", "bombardment", "buckner", "dares", "tems", "eigen", "siesta", "satirical", "paar", "antoinette", "ugo", "cynic", "amenable", "runways", "frowned", "sass", "rout", "pus", "rubies", "checkered", "hatched", "sketching", "hypocritical", "trample", "courtship", "cupboards", "tolerable", "magi", "brescia", "alonzo", "tutto", "attenuated", "inefficiency", "merci", "booms", "demented", "eri", "bonaparte", "musketeers", "twickenham", "glee", "forgets", "grapple", "lowlands", "stimulants", "greenery", "proverbial", "tranquillity", "numa", "monastic", "uncles", "eph", "soared", "householders", "nestor", "impediment", "hel", "anarchists", "freund", "perilous", "devonshire", "tanto", "violets", "nouvelles", "nether", "nomads", "ramble", "ambulances", "natura", "hams", "idiotic", "parti", "cerberus", "bering", "formosa", "erg", "bough", "hoot", "herewith", "workmen", "grist", "penrose", "duster", "pronoun", "signer", "sloth", "steely", "pulleys", "fates", "stews", "nourishment", "gravitation", "loophole", "drags", "retrograde", "sade", "exaggeration", "shadowy", "liquors", "archangel", "fenwick", "creases", "primordial", "nourish", "vit", "uplifted", "percival", "gingham", "batterie", "gossamer", "hairdresser", "plover", "weg", "mow", "disliked", "leinster", "impurity", "worshipping", "chasm", "nuovo", "greenish", "regiments", "adel", "selfishness", "reactionary", "adriatic", "ejected", "grappling", "hammering", "mingling", "earnestly", "scribes", "leed", "monologue", "amphitheater", "vive", "signaled", "clem", "littered", "acutely", "razors", "masse", "legumes", "speculated", "worded", "quant", "fleshy", "desirability", "sundown", "persistently", "decoy", "balsam", "baruch", "verdicts", "authorise", "outcry", "eyeglass", "waterside", "grime", "extortion", "cordon", "colorless", "idealistic", "cutlass", "rigor", "greyhounds", "amalgamation", "preponderance", "cowardly", "pretentious", "cervantes", "wielding", "gusto", "maidens", "weimar", "mijn", "humbly", "langue", "unworthy", "expectant", "laurens", "azalea", "jeannette", "fruition", "florentine", "dwelt", "vlaanderen", "oberon", "enslaved", "vil", "cathay", "jura", "correspondingly", "legalized", "predicament", "hilly", "aisles", "trusty", "gratuitous", "fatally", "caged", "ephemeral", "radium", "dissimilar", "mutilation", "kon", "waging", "infringed", "overwhelm", "cognizant", "profil", "andalusia", "rowdy", "popes", "bravely", "sportsmen", "stumbles", "clematis", "slashing", "leger", "incomprehensible", "suez", "clogged", "gabriella", "fluctuating", "demeanor", "shipboard", "labourers", "paganism", "fido", "sounder", "mest", "caledonian", "hegel", "stench", "cursing", "pmb", "wickedness", "crouching", "attila", "emits", "culminated", "thefts", "sturm", "weiter", "auld", "spanned", "ebenezer", "closeness", "redeeming", "polity", "scriptural", "transylvania", "obscenity", "gaul", "heartache", "reigned", "entitles", "exacting", "wanton", "pelle", "enforces", "necessitate", "locket", "aver", "commemorating", "reconciling", "desolation", "gander", "bastille", "traceable", "voila", "savor", "darkly", "faithfulness", "resourceful", "heraldry", "incomparable", "dilated", "angered", "condone", "ahora", "mademoiselle", "constitutionality", "viscount", "preliminaries", "devolved", "liquefied", "alcatraz", "streamed", "resorting", "garters", "adamant", "pontoon", "tableau", "vernal", "napoleonic", "tennyson", "rubicon", "disorderly", "tala", "ivanhoe", "destroyers", "analogies", "frigate", "instalment", "dazed", "sentient", "entrust", "iti", "puffs", "burying", "dispatching", "cyclops", "veritable", "posterity", "keenly", "healthful", "nem", "meine", "repealing", "gourd", "groaned", "ferocious", "voicing", "mons", "sacrificial", "defies", "abnormally", "resuming", "bruising", "flogging", "religiously", "mundi", "encroachment", "demande", "seaboard", "laplace", "southerly", "humiliated", "unearthed", "sut", "cataracts", "subordinates", "vagabond", "consecrated", "oscillating", "jib", "bodice", "foray", "opiate", "cristal", "unmistakable", "filly", "rhubarb", "silencing", "aesop", "hab", "diminishes", "tidings", "sneaking", "unassisted", "insidious", "dike", "immutable", "croton", "depots", "nodding", "jasmin", "libri", "misrepresented", "amici", "substantiate", "algiers", "ocho", "templar", "cedars", "fortitude", "aloft", "mated", "wart", "tribus", "hollander", "ruffled", "armament", "plums", "tien", "revisiting", "fairer", "enterprising", "prides", "grafting", "smoothness", "trinket", "neutralize", "vasco", "playwrights", "wishful", "fal", "herod", "trailed", "habitation", "rogues", "speechless", "expanse", "preside", "arles", "colette", "delightfully", "oeuvres", "concealment", "unruly", "uncompromising", "moriarty", "obstruct", "unbounded", "coincided", "encased", "undertaker", "flickering", "sive", "gush", "saddened", "bathe", "scarred", "ignited", "crowding", "tew", "vrouw", "gladiators", "krebs", "stoddard", "scrooge", "aeroplane", "nagging", "contemporaneous", "precipitated", "hiss", "outlawed", "injuring", "bellow", "girth", "poppies", "inlaid", "notched", "baldness", "didactic", "lillie", "irritability", "provocation", "lustrous", "reeling", "desertification", "rennes", "crests", "molto", "loafers", "slapping", "tiene", "squires", "insures", "slaying", "mie", "frauds", "lobes", "dios", "thundering", "remus", "coals", "succulent", "heartily", "hic", "yellowish", "unsuccessfully", "moderne", "moustache", "geen", "lobsters", "eventful", "feasts", "stiletto", "teacup", "rebekah", "kein", "alvarado", "secession", "countered", "instinctively", "conspiracies", "chapels", "grado", "minions", "brunt", "infraction", "gory", "glens", "strangest", "stagnation", "displace", "countrymen", "perishable", "lyra", "gustave", "proteus", "denoting", "apiece", "jeanie", "strasse", "gammon", "storming", "islet", "conduits", "cinco", "headway", "friars", "maples", "alluring", "ikke", "edouard", "buzzard", "bony", "halting", "sana", "halley", "cranks", "headwaters", "reviving", "burrow", "universality", "veranda", "underrated", "insatiable", "exquisitely", "unfriendly", "hatches", "christened", "actuality", "teased", "murad", "attica", "flatten", "savant", "appreciating", "stinging", "membres", "gulls", "prescribes", "sultry", "sinned", "globular", "asiatic", "macaulay", "depositing", "engravings", "showering", "fanatical", "caper", "yann", "predicated", "montezuma", "lentils", "quack", "bruges", "grooms", "ousted", "cask", "grocer", "speedily", "auberge", "negroes", "chases", "intervened", "mezzo", "incarnate", "chimneys", "hela", "preoccupied", "hither", "diggers", "glances", "tyrants", "constantin", "giddy", "denounce", "entertainments", "oaths", "furness", "ripples", "herz", "bloodshed", "maw", "viento", "upsetting", "durante", "oxen", "nascent", "toda", "reinforcements", "precept", "salerno", "pavements", "murmured", "propellers", "violinist", "himalaya", "gibbon", "gratifying", "delirious", "excepting", "unlawfully", "spanien", "urchin", "polygamy", "utterances", "devising", "sustains", "woodman", "gravely", "errands", "hells", "cartes", "impulsive", "spasms", "rationally", "psychologie", "uproar", "savages", "craters", "wilmot", "mockery", "railings", "paulina", "northerly", "tenths", "quench", "passer", "projekt", "encompassed", "broil", "hurrah", "modestly", "epitaph", "allahabad", "insurrection", "brugge", "alger", "emigrated", "barges", "nota", "tremblant", "antennae", "fermented", "enfant", "headmaster", "walrus", "secretive", "grievous", "generative", "assyrian", "repetitions", "pensioner", "spellbound", "bretagne", "tengo", "domenico", "fend", "sapphires", "compressing", "intoxicating", "crumble", "resorted", "lecturing", "retreated", "senza", "magdalene", "veer", "netted", "dispel", "warships", "tamar", "woodbine", "straightening", "envious", "regretted", "colic", "oni", "membre", "adolph", "farthest", "iniquity", "fooling", "vaulted", "warms", "formalities", "resounding", "aku", "brazos", "saucy", "blistering", "illuminates", "masque", "kazan", "shillings", "gleaned", "decomposed", "flowery", "scandalous", "blas", "ciel", "menacing", "elector", "lili", "neurotic", "bituminous", "askew", "phipps", "groan", "dusting", "lombardy", "uncontrollable", "shackles", "shrines", "bridged", "consenting", "torturing", "toile", "relentlessly", "bracken", "couches", "decadence", "antes", "nourishing", "herschel", "reconsidered", "anche", "arduous", "morten", "assimilated", "creeps", "gripped", "sama", "unscrupulous", "nymphs", "unsettled", "inseparable", "caso", "jurist", "vestal", "dismisses", "variously", "arran", "unintentionally", "sprites", "dashing", "tiring", "abate", "piloting", "decreed", "mossy", "ores", "banque", "keyhole", "usages", "wickham", "vieux", "bowels", "cornet", "reversion", "sanctuaries", "convicts", "osman", "lodger", "santee", "thunderbolt", "claudius", "tremors", "apropos", "pitiful", "winkel", "sparrows", "bleached", "arbiter", "locomotion", "hus", "antimony", "hater", "buoyant", "expel", "martine", "combatant", "swoop", "neuter", "prejudicial", "gente", "introspection", "meister", "mariage", "benedictine", "reputations", "vitally", "mavis", "undivided", "chatted", "lured", "hurling", "brevity", "visage", "prickly", "septembre", "astonishment", "overshadowed", "rescuing", "sensibilities", "meritorious", "beheld", "martyrdom", "manna", "octobre", "moorings", "buddhists", "soars", "gnat", "housework", "gunpowder", "undressed", "southward", "liszt", "zwei", "zorn", "recounted", "denials", "prussian", "adorn", "contemplative", "awkwardly", "etta", "projets", "lik", "belles", "stipulations", "lifeless", "baffle", "pared", "sobriety", "slums", "burnet", "spaniards", "piloted", "successively", "cucumbers", "squaw", "snowdon", "pomegranate", "glas", "bouts", "transcends", "murmur", "bookkeeper", "crickets", "extinguishing", "noche", "attache", "bulging", "chemise", "epics", "smug", "flanking", "dons", "stadt", "prejudiced", "larva", "laziness", "mouldings", "tireless", "leander", "growl", "gorges", "stata", "canons", "pastimes", "diurnal", "coolness", "busca", "recumbent", "shipwreck", "fader", "unconsciously", "buffaloes", "marne", "dissolving", "osmond", "highness", "abstracted", "typhoid", "perfecting", "nez", "furtherance", "suis", "slits", "inquires", "yule", "phantasy", "sprache", "hoss", "crusty", "stillness", "precipitate", "underlie", "pharisees", "nicknamed", "drones", "minster", "sully", "bate", "pert", "depositions", "camped", "fraught", "perplexed", "replenish", "necessitated", "slowest", "unwillingness", "sehen", "trimmings", "esperanza", "divan", "lehrer", "holborn", "concours", "extraordinaire", "eloquence", "definitively", "natchez", "tripped", "strewn", "rubles", "bewildered", "beatings", "copious", "cade", "tremble", "instantaneously", "thump", "ghi", "pompeii", "alluded", "aberrations", "sojourn", "stateroom", "palacio", "adherents", "herbaceous", "distinguishable", "immaterial", "sina", "surging", "lop", "greased", "contraband", "flagging", "willed", "wounding", "inclement", "ange", "magpie", "stil", "robbing", "impartiality", "phosphates", "harpsichord", "capes", "impersonal", "proposer", "interpolated", "strolling", "moro", "salvo", "twigs", "furiously", "epitome", "joked", "breaths", "lilian", "glancing", "discarding", "fared", "fleck", "inflamed", "clough", "unlink", "shadowing", "wert", "regimental", "signifying", "tutte", "rectified", "savoie", "flanked", "bayonne", "primacy", "fuego", "buckland", "centrale", "eyeing", "bade", "insolvent", "mists", "nuit", "carmine", "relinquish", "emilie", "succinct", "palpable", "eton", "estar", "inhale", "dreamt", "convulsions", "snowshoes", "fiancee", "fue", "blumen", "yolk", "mediocrity", "rhyming", "sucht", "transcendent", "lichen", "lapsed", "stroked", "gallop", "cull", "unsatisfied", "wmo", "minstrel", "ewe", "contentment", "fareham", "cranium", "politic", "exchequer", "falsehood", "slugs", "carcasses", "piero", "candlesticks", "rosalie", "mingled", "rafts", "indulgent", "longed", "rammed", "wailing", "shrugs", "negros", "vertebrae", "moans", "buffets", "aristocracy", "eaves", "popularly", "brinkley", "marred", "falconer", "watchman", "venturing", "entitle", "bagley", "alibi", "ahoy", "jellies", "postponement", "brooding", "juncture", "greenleaf", "naturalized", "pikes", "haar", "meager", "commandant", "copernicus", "bourgeoisie", "plucked", "inflexible", "flowered", "bueno", "discord", "patrolling", "injurious", "voiture", "utilitarian", "compacted", "ende", "doughnuts", "reread", "stormed", "crucifix", "irreverent", "censure", "carbine", "credo", "heartless", "contented", "vultures", "forcible", "bushy", "thickening", "moins", "porches", "inoculation", "luxuries", "glorify", "abner", "maris", "admixture", "heredity", "nominally", "forza", "chloroform", "nettle", "mismanagement", "convincingly", "evangeline", "descends", "mischievous", "fateful", "complacency", "impregnated", "insular", "lagoons", "sensuality", "vere", "affix", "professed", "unrivalled", "sensuous", "owne", "sawing", "yelp", "herding", "mammalia", "hopped", "sceptical", "arma", "interfered", "halcyon", "bowing", "cogent", "parishioners", "traversing", "uninformed", "yorke", "aberration", "mollie", "nef", "conclusively", "calcareous", "tufted", "chieftain", "gestalt", "honeysuckle", "zeitschrift", "unspoken", "ishmael", "apprehended", "rhoda", "jammer", "forbidding", "sparring", "mindanao", "adonis", "domed", "distressing", "prettiest", "lif", "panes", "testifies", "filipinos", "chambre", "dainty", "crackle", "jes", "thwarted", "alban", "planks", "orville", "belcher", "spirals", "speculations", "sedentary", "extermination", "plumes", "outweighed", "transposition", "acheter", "beets", "repel", "pali", "coleridge", "anxieties", "poste", "onerous", "tenderly", "bonny", "haddock", "virginian", "pyjamas", "finns", "oftentimes", "entanglement", "miserably", "savoir", "rojas", "argosy", "elba", "stumps", "clouded", "diverting", "derogatory", "esteban", "xxiv", "sear", "rouen", "inaccuracy", "assimilate", "medea", "regenerated", "laine", "gottfried", "rapp", "credence", "welling", "patrolled", "georgette", "lovelace", "caen", "conferring", "incite", "divulge", "wardens", "scrubbing", "laughable", "momentous", "footpath", "entreprise", "harem", "fussy", "civility", "deluge", "squadrons", "ventricle", "fluted", "sweetened", "pry", "venison", "shoal", "basking", "pare", "blushing", "breathes", "lectured", "babylonian", "annonce", "morte", "bord", "skillfully", "heady", "confucius", "bombarded", "celts", "bathed", "cortes", "intractable", "corresponded", "speckled", "enumerate", "persuading", "onondaga", "diphtheria", "plaines", "hoard", "offre", "courting", "petrie", "lading", "woodcock", "churning", "chariots", "battalions", "unquestionably", "presque", "reproach", "viol", "vishnu", "cherub", "lieder", "trumpeter", "straws", "serrated", "puny", "emphatically", "reassured", "perceiving", "commendation", "leben", "contending", "patriarchal", "spelt", "barks", "dodging", "antiseptic", "browned", "oed", "hendrik", "highlanders", "ligaments", "wurde", "upheaval", "cringe", "crimea", "sugarcane", "mouthful", "gazelle", "gauche", "minion", "complicity", "unstrung", "tendons", "thrives", "penchant", "drab", "roared", "prospector", "unwise", "financier", "allegory", "harbours", "konstantin", "acropolis", "stifle", "tiberius", "paradoxical", "rousing", "sebastopol", "knelt", "radiating", "devour", "treachery", "petting", "inoculated", "princesses", "rossini", "portraiture", "incapacitated", "attested", "ope", "nuestra", "overcrowded", "warring", "arouse", "ticked", "purged", "repulsive", "sikkim", "seclusion", "elucidate", "fated", "frighten", "amputation", "halts", "subtlety", "creditable", "protruding", "appreciable", "delicacy", "paradis", "cinch", "futility", "dumplings", "diesen", "upholds", "enlistment", "inroads", "blissful", "boasted", "zealanders", "stirs", "platonic", "donkeys", "etna", "averse", "siempre", "afield", "endearing", "mishap", "lackey", "quod", "labors", "whooping", "sonnets", "musing", "masai", "barricade", "inquest", "snipe", "hapless", "cuenta", "polen", "ably", "montagne", "brun", "mirza", "beaux", "traversed", "sparsely", "shrinks", "channing", "fib", "ail", "innkeeper", "mistrust", "overcomes", "lordship", "egregious", "cubans", "transacted", "blaise", "chaplains", "conventionally", "nuestro", "perceptive", "haber", "lard", "destitute", "platz", "disbanded", "singly", "headless", "petrified", "emigrants", "thane", "salve", "hindustan", "marseilles", "beauchamp", "grates", "fissure", "curtail", "talker", "divorces", "vitesse", "winks", "harte", "loopholes", "soit", "novelists", "bestow", "homespun", "hulls", "complimented", "intonation", "proclaims", "dissecting", "clamped", "retracted", "friar", "hospitable", "melodrama", "creased", "preparer", "postures", "trapper", "makeshift", "tattered", "embarrass", "slanted", "plagues", "jota", "harvests", "surged", "blume", "natured", "clemency", "woolly", "blemish", "ajouter", "bushels", "tapers", "geniuses", "rind", "whiskers", "huntsman", "personne", "perpetually", "soundings", "evicted", "rara", "divisible", "accumulations", "lightness", "avoir", "quelle", "admirers", "marcello", "harbinger", "mustache", "revolutionize", "dwindling", "beaker", "arcades", "baggy", "jeweled", "rejoicing", "uomo", "ariadne", "dickie", "quiver", "sylvie", "frequented", "coronet", "agnew", "discredited", "taverns", "prodigal", "aden", "wield", "resolute", "adage", "wetter", "jeg", "conjure", "rote", "recitals", "adrift", "confiscation", "stings", "budge", "ilk", "ose", "silks", "sequins", "fringed", "goblins", "delineate", "organist", "kneel", "illuminations", "chuckled", "tacitus", "armenians", "excels", "furthest", "virulent", "masts", "garret", "commendable", "inadequacy", "barbaric", "deliciously", "ruse", "persephone", "lifelike", "culled", "muss", "presbytery", "tumblers", "gunshot", "desiree", "supposing", "sculptors", "charme", "calicut", "inde", "castilla", "zealous", "rattlesnake", "iridescent", "robberies", "elms", "excelled", "twine", "meteors", "judicious", "unaltered", "collation", "geist", "silvio", "parke", "diction", "unoccupied", "tigris", "pedestals", "tribulations", "colman", "sabina", "meilleurs", "buckwheat", "enshrined", "surpasses", "yearling", "agape", "wrenching", "damnation", "rapidity", "bajo", "tempus", "deleterious", "intersecting", "garibaldi", "alluvial", "xxv", "incisive", "concealing", "clutching", "drifts", "tenement", "discernment", "chalice", "hypocrite", "harrowing", "prefect", "sweetly", "cleave", "flimsy", "strada", "delilah", "bedded", "shivering", "formality", "produit", "mangroves", "suffices", "bingley", "whosoever", "comte", "tigre", "cham", "graced", "ultimo", "statuary", "moraine", "moravian", "intermittently", "armaments", "grins", "chewed", "accomplishes", "inapplicable", "bly", "pasha", "scour", "motionless", "notaries", "galant", "fallow", "indictments", "aileen", "leapt", "pelo", "widower", "quagmire", "taffy", "purging", "cleansed", "bem", "fainting", "theorist", "scaring", "serviceable", "obstructed", "indigestion", "jackal", "snowflakes", "massacres", "entailed", "curative", "bier", "traitors", "igneous", "cambio", "lull", "rinsed", "delectable", "proletariat", "lise", "fanciful", "bey", "mystics", "fresher", "consummate", "brows", "technic", "veda", "ephesus", "domesticated", "dismayed", "steered", "remitted", "shew", "miraculously", "lapses", "romagna", "freemasonry", "dwells", "penitentiary", "shrewd", "impatience", "italie", "crass", "spaulding", "jot", "gott", "benevolence", "lancelot", "suspiciously", "eugenia", "reprimand", "mangled", "staunch", "shaven", "fez", "feld", "molestation", "quarts", "yells", "lacs", "blindfolded", "premiers", "wraith", "nimble", "hyacinth", "yonge", "durst", "naturalists", "derelict", "gle", "shrouded", "clarissa", "brazen", "inundated", "joie", "brahma", "anni", "veracity", "pinocchio", "angers", "gustavus", "raps", "unwittingly", "counsels", "battlefields", "antecedent", "matty", "dorothea", "licht", "legislate", "voluptuous", "complacent", "germania", "grandmothers", "dalla", "objet", "unaccompanied", "schooled", "picts", "foresters", "hag", "guerre", "dorn", "ainsi", "orinoco", "loveless", "sharpened", "nostrils", "cambrian", "impure", "gridiron", "innermost", "wry", "pilate", "pinning", "alms", "stung", "koko", "phantoms", "retort", "congregate", "meditative", "smirking", "chestnuts", "expositions", "begotten", "gainsborough", "sparkles", "collared", "stringed", "barnabas", "weeding", "evasive", "smirk", "ancora", "pausing", "grands", "replete", "inconceivable", "antworten", "crutches", "apportioned", "pawnee", "accumulates", "failings", "otra", "bristle", "classe", "terrors", "uriah", "oblige", "visite", "panacea", "vibrate", "penetrates", "mayhew", "cathedrals", "toads", "liber", "perceives", "nubian", "stumped", "cramp", "sodom", "imitations", "mistletoe", "naam", "hallowed", "appease", "hawes", "furlong", "heralded", "linde", "clearest", "supersede", "shovels", "renaud", "phrasing", "quarries", "sensibly", "vio", "mouthed", "gills", "braids", "milder", "inexplicable", "counterfeiting", "expeditious", "intently", "chrysalis", "rechercher", "hoary", "corse", "crocodiles", "ronde", "eze", "zeno", "deceiving", "oedipus", "beamed", "scraped", "chagrin", "vill", "tickled", "hindrance", "discreetly", "sparing", "emeralds", "wanders", "disillusioned", "preoccupation", "stato", "restful", "aristocratic", "scouring", "profitably", "pinched", "purport", "plunging", "shambles", "juillet", "marten", "admittance", "stinking", "porridge", "symbolize", "standstill", "unattractive", "diffused", "firmer", "reproduces", "promulgation", "unshaven", "rakes", "sante", "incognito", "silliness", "burgh", "giggling", "coldest", "proviso", "quando", "barnyard", "dikes", "vento", "donal", "artifice", "dato", "glides", "allot", "witte", "vad", "progenitor", "abomination", "erste", "mote", "argumentation", "passively", "hurled", "vesta", "jacky", "wold", "habe", "straightened", "deranged", "contesting", "darwinian", "touchy", "rafters", "unintelligible", "whitworth", "hinten", "infantile", "unspeakable", "demolish", "comforted", "disgraceful", "worshippers", "servitude", "aqueduct", "framers", "streamers", "humbled", "marcella", "radiate", "stipulate", "proximate", "secretions", "attains", "gallus", "idem", "hark", "perturbed", "cemented", "dissolves", "crowning", "bettina", "smuggled", "punctuated", "blunder", "euston", "zucker", "belted", "baal", "felon", "deen", "thud", "hagar", "antlers", "doubting", "dunkirk", "libretto", "debatable", "reaping", "aborigines", "estranged", "merthyr", "ihn", "joh", "decisively", "swims", "undeniably", "spasm", "kom", "notables", "eminently", "snorting", "seguro", "mercilessly", "firs", "cobbler", "invigorating", "heinous", "dusky", "kultur", "esso", "linnaeus", "infallible", "loaves", "dieu", "heeled", "quibble", "meandering", "incessant", "baines", "blick", "namen", "cheery", "curbing", "harshly", "betterment", "rump", "oben", "sweethearts", "slush", "mutton", "coi", "blinked", "altri", "lenore", "townshend", "zigzag", "lesen", "dragoon", "sympathies", "leggings", "benefactor", "thales", "nacht", "merrily", "vouch", "pompey", "blackness", "transitory", "gales", "hypocrites", "larynx", "droughts", "ancona", "springing", "bethune", "nocturne", "perdue", "altruism", "ceasing", "dutchman", "capricious", "angelique", "harmonize", "crescendo", "gipsy", "frederik", "miserables", "amalgamated", "obeying", "gunners", "pent", "mishaps", "subsidence", "plastering", "promiscuous", "asturias", "basso", "dusted", "sago", "inlets", "fords", "pekka", "parentage", "mutter", "litters", "brothel", "rive", "shelled", "outlandish", "sneezing", "sancho", "variegated", "abysmal", "personnes", "bourse", "tenacity", "partir", "moslem", "fourths", "revolutionized", "permanence", "coincident", "inez", "minding", "permis", "enviable", "accessions", "carpeted", "zeke", "eloquently", "overtaken", "hock", "subheading", "renews", "extinguish", "oli", "lowing", "bullied", "accruing", "dirge", "actuated", "bluish", "tingle", "captivated", "parlors", "lamented", "bruise", "cesare", "perfumed", "dames", "unfettered", "imogen", "lewd", "thither", "rebuke", "collated", "occasioned", "swayed", "dupe", "bogs", "affording", "assuredly", "allusions", "shadowed", "seamen", "intelligible", "overlaid", "censors", "shakespearean", "edict", "octavia", "boyhood", "sustenance", "shrew", "freya", "disrespectful", "confounding", "dispensation", "arian", "depreciated", "diagonally", "cased", "laterally", "prays", "nonce", "lemme", "elevating", "augustin", "beresford", "loup", "likened", "bericht", "sketched", "plage", "firmness", "injustices", "longfellow", "unequivocally", "perspiration", "mirth", "serre", "pauper", "brooms", "horus", "casi", "fois", "ushered", "remedied", "vocations", "depuis", "scorched", "instep", "wilfrid", "machiavelli", "ivor", "mignon", "houseboat", "krieg", "clementine", "smokeless", "stanhope", "thorax", "recherches", "warship", "corinthian", "rattles", "esti", "garten", "dislocated", "marvels", "booby", "conceivably", "persians", "injunctions", "crunching", "exuberant", "dus", "composure", "contradicted", "birthright", "errant", "proofread", "rearranged", "heifer", "earthen", "uplands", "paget", "portcullis", "noose", "recur", "desirous", "exemplar", "shivers", "smitten", "rarest", "quiero", "averted", "publique", "dissipated", "gregorio", "masquerading", "discernible", "looser", "ptolemy", "lauded", "pais", "consonants", "demarcation", "miocene", "steeple", "concussion", "nailing", "deadliest", "sparingly", "penance", "priestly", "curtailed", "lovejoy", "rollo", "conspicuously", "risked", "bowled", "modernized", "blemishes", "eagerness", "pearly", "recklessly", "islets", "apothecary", "gagne", "looted", "padua", "jointed", "heyday", "voce", "pulsating", "beaming", "dore", "taint", "lounging", "predisposition", "outwardly", "tumultuous", "overseer", "chine", "crier", "decompose", "unimaginable", "briton", "glistening", "moonshine", "jurgen", "leurs", "scribble", "anselm", "fete", "puerta", "peculiarities", "lichtenstein", "favourably", "beset", "romain", "involuntarily", "swede", "discoverer", "livers", "plowing", "militarism", "glassy", "riddled", "wealthiest", "shrill", "swedes", "headland", "agitator", "utensil", "volk", "sheba", "glows", "heighten", "surpassing", "ladle", "pasa", "pinks", "rusted", "naturalistic", "dogmatic", "tristram", "ballon", "surly", "presente", "sonne", "fertilized", "admirer", "seco", "gibt", "motioned", "catastrophes", "thickened", "indra", "candor", "sabin", "wigwam", "animales", "beheaded", "postmark", "helga", "bereaved", "malin", "drugged", "motte", "volga", "rivalries", "gnomes", "denne", "affectionately", "uneducated", "necessitates", "blunders", "proportionately", "corea", "porque", "mocked", "holler", "fain", "hae", "sint", "darrin", "mois", "cruelly", "tapioca", "furrow", "fewest", "parables", "drowsy", "bushel", "beholder", "sedition", "lutherans", "examen", "ghastly", "vaudeville", "succumb", "criticise", "inquisitive", "doorways", "sirs", "overruled", "menagerie", "osgood", "teamsters", "seul", "forked", "apprehensive", "cowards", "cielo", "cowl", "captors", "fils", "laity", "prefixed", "arming", "amassed", "itinerant", "felons", "dormitories", "dearth", "palatable", "unmasked", "instinctive", "corpo", "sais", "restlessness", "baptised", "burlesque", "regaining", "perversion", "swells", "sujet", "acquaint", "tog", "altro", "havelock", "lengthening", "taut", "laa", "romulus", "sommers", "doings", "financiers", "foolishness", "unequivocal", "noire", "arriba", "silken", "stringing", "bazar", "thrusting", "pavilions", "maddy", "clung", "hie", "bist", "needlessly", "squatting", "cordially", "wilkie", "succumbed", "superstitions", "spangled", "rectory", "alli", "multum", "iliad", "graze", "looped", "unobtrusive", "judea", "currant", "underlies", "intricacies", "afoot", "oddity", "gerrit", "cornered", "auspicious", "splashing", "hotly", "puffed", "disapproved", "interlaced", "instalments", "presumptive", "comprehensible", "tempore", "fallacies", "theodor", "sawdust", "metaphorical", "leaped", "alertness", "embers", "assemblages", "searchlight", "heil", "swinton", "ize", "snob", "stave", "vertu", "snowing", "bleeds", "canaries", "semblance", "shins", "fickle", "outnumbered", "recht", "lukewarm", "quai", "rotunda", "observances", "faintly", "indiscriminate", "alphonse", "piu", "raison", "eyeballs", "barricades", "devoting", "idolatry", "decked", "introspective", "aggravation", "sedge", "nou", "pinching", "tine", "pretenders", "infidels", "dweller", "diabolic", "demonstrable", "letzte", "priestess", "nimrod", "irritate", "siguiente", "beards", "churchyard", "despicable", "canter", "reminiscences", "racy", "stoop", "intr", "rendu", "facile", "christiana", "coerced", "billets", "sneeze", "sian", "dignitaries", "somber", "overgrown", "statesmen", "vecchio", "advices", "coffers", "sikhs", "awry", "celt", "lode", "elia", "zora", "rages", "clumps", "tithe", "subordination", "fictions", "deposed", "trending", "disinterested", "forsake", "conspirators", "swinburne", "unresponsive", "baboon", "romani", "swamped", "ensues", "habla", "seit", "elated", "buttered", "sangre", "selfe", "stuffy", "depress", "eccentricity", "transgression", "idealized", "clings", "flamboyant", "memoria", "nachricht", "macht", "toma", "clergyman", "sociales", "scape", "francia", "pledging", "dependants", "rechte", "puddings", "partisans", "mausoleum", "idler", "dawned", "generale", "carelessly", "narcissus", "crusoe", "einfach", "skimming", "stomachs", "namesake", "slaps", "maximilian", "gratuity", "reorganize", "foothold", "reggio", "usted", "madge", "gleam", "rudyard", "supposition", "sprinkling", "besieged", "malaise", "draperies", "newby", "rococo", "brabant", "superlative", "presser", "chamois", "dwt", "voy", "seared", "tinged", "professorship", "diamant", "leeward", "fruitless", "tamer", "ticklish", "alienate", "displeasure", "connoisseurs", "mutilated", "usefully", "instituting", "balzac", "moyen", "threefold", "innocently", "deepened", "clef", "dak", "pura", "regarder", "trice", "pretense", "jungles", "imitating", "shreds", "petitioned", "thad", "archway", "danse", "loudest", "ultimatum", "shuffled", "moy", "shelling", "visita", "zeitung", "observant", "unhappiness", "cinder", "pelt", "ung", "laurels", "methodical", "engulfed", "bequests", "monotonous", "pythagoras", "operatic", "malevolent", "lessened", "stile", "reciting", "naught", "antagonism", "prisms", "debby", "coinage", "unproductive", "banqueting", "nefarious", "stoppage", "defray", "endangering", "zealots", "weighty", "oeuvre", "subsided", "sahib", "gasping", "idiocy", "frenzied", "postulate", "senor", "trespassing", "pendent", "edifice", "vermin", "loosening", "dialectic", "tantalizing", "rhinoceros", "adjutant", "otro", "sickening", "pondered", "teil", "snows", "steeper", "rangoon", "depriving", "stalwart", "verandah", "schreiben", "buttery", "deformity", "cronies", "undervalued", "invalidity", "soundly", "dank", "pinkerton", "canvases", "weakens", "paulus", "ebcdic", "politik", "lariat", "pursuance", "scapegoat", "anathema", "comptes", "trifle", "forefathers", "piraeus", "xxvi", "eradicated", "toga", "fram", "inadmissible", "strasburg", "berths", "innocuous", "heroines", "retake", "unpacked", "gonzalo", "clenched", "groupes", "evaporate", "midwinter", "compagnie", "bellini", "undoing", "communes", "cassava", "disappointments", "glace", "puns", "hilt", "devoured", "inwardly", "adeline", "smothered", "eulogy", "siva", "lond", "forsythe", "pernicious", "fenster", "continua", "babbitt", "reims", "scrimmage", "privates", "whims", "hew", "skirmish", "roan", "nonsensical", "gallows", "rheumatism", "devotee", "nieuw", "cowardice", "fabled", "fangs", "animosity", "wily", "wiles", "ensue", "jaffa", "sagging", "chemin", "crumbled", "sybil", "pekin", "defied", "hopelessness", "errand", "yeoman", "slimy", "unser", "coerce", "overhang", "ihren", "jeunes", "sobbing", "muslin", "deliberative", "gute", "tattooing", "shekels", "emigrant", "dodo", "jahr", "thorny", "epistles", "trampled", "anthracite", "meditating", "merciless", "clump", "transcribe", "atrocity", "elinor", "proportionally", "untrained", "beene", "thrusts", "tiresome", "splashed", "antonyms", "lune", "moccasins", "parthenon", "abounds", "salutes", "collided", "tilde", "potash", "boarders", "lapping", "chivalry", "corazon", "frustrate", "sideboard", "poaching", "montmartre", "foiled", "flocked", "connaught", "tether", "hyperbole", "borghese", "schrieb", "brahman", "charlemagne", "pulsing", "heralds", "sterility", "dynasties", "prowl", "amiable", "akt", "sittings", "undulating", "thatched", "felice", "esto", "irrevocably", "bunyan", "hinders", "tubers", "unrelenting", "expeditiously", "antiquated", "jerked", "sputtering", "opulent", "mots", "dimly", "coconuts", "confuses", "executors", "squall", "nothingness", "hebrides", "demeter", "antagonistic", "bowery", "immovable", "caterpillars", "consigned", "rhein", "fervor", "pret", "scooped", "exerts", "idling", "cursory", "dissipate", "hymen", "refuted", "ionian", "americanism", "pessimism", "vehemently", "velvety", "vedere", "wheezing", "teeming", "paradoxes", "lampe", "foolishly", "ordre", "eer", "inanimate", "panting", "comers", "romaine", "wulf", "peckham", "tacks", "veille", "effusion", "lunacy", "loathe", "notoriety", "showered", "brats", "huddle", "taxicab", "confounded", "coughs", "pretends", "faery", "eloise", "widens", "omnipotent", "gautier", "poise", "zeeland", "ringed", "cima", "huddled", "unsteady", "zwischen", "duchy", "malacca", "wol", "magda", "carrion", "summarily", "heine", "voi", "ejaculations", "leopards", "dette", "sanctified", "tradesmen", "excitedly", "pentru", "braced", "gaunt", "nourished", "cornstarch", "doch", "effie", "daffodils", "lettre", "boden", "pollute", "bara", "kamen", "neuer", "pomp", "noms", "stora", "sprouting", "summoning", "annabel", "tartar", "brownish", "rejoin", "rosettes", "etats", "volition", "crawls", "suave", "riddance", "gulp", "lottie", "hac", "lurk", "smudge", "tulle", "helplessness", "circumstantial", "dermot", "naturalism", "haga", "colle", "galloping", "indestructible", "principality", "indulging", "allusion", "bosh", "samaria", "smeared", "gouvernement", "liqueurs", "winifred", "parasol", "coloration", "stingy", "succinctly", "devotes", "manet", "anos", "vigour", "snares", "schnell", "illegible", "mortars", "didst", "curiosities", "wither", "schloss", "seamed", "calmed", "flattered", "babbling", "roch", "admirably", "vipers", "nightfall", "nul", "manos", "hurl", "loyalists", "dory", "sheltering", "forego", "castile", "klasse", "blockquote", "tyrol", "irreparable", "immunities", "broiled", "superstitious", "evangelists", "insides", "sedative", "defraud", "toothed", "bygone", "wilds", "intercession", "complet", "lettered", "mirada", "paa", "apricots", "darkening", "depressions", "mache", "toasting", "exhale", "markt", "altars", "abolishing", "chauncey", "recesses", "kinsman", "payed", "overworked", "cecile", "orbs", "aime", "mutable", "delicacies", "toujours", "scorching", "coffins", "jove", "cashed", "ushers", "jewry", "copperfield", "chapelle", "whoop", "cacao", "andra", "annoys", "heiress", "godhead", "canvassing", "portia", "shyness", "angelus", "subjecting", "momento", "escorte", "unsightly", "frayed", "criminality", "woolen", "repos", "levelling", "shrapnel", "arthurian", "burgos", "litany", "fairest", "nutter", "bristles", "larder", "ganges", "machen", "truthfulness", "atrocious", "obelisk", "valeria", "claret", "fru", "samos", "consecration", "forbearance", "acerca", "plastered", "apostrophe", "stepmother", "ruf", "lapland", "publius", "ihnen", "jesuits", "voluminous", "mottled", "plu", "tosses", "manifesting", "estella", "publics", "rien", "normandie", "scrip", "rocher", "inadequately", "arabella", "matti", "throng", "flemming", "shunned", "abandons", "appetites", "turnip", "juxtaposition", "crushes", "carnivorous", "berber", "mince", "banish", "flapping", "fino", "frets", "schism", "sculptured", "suivant", "jemima", "heretics", "dogged", "apparition", "barristers", "scrutinized", "earthworks", "thrashing", "salome", "thumping", "vara", "quenching", "hunch", "amaryllis", "messes", "perdition", "wintering", "topple", "chickasaw", "pungent", "discontinuance", "unbridled", "astrologer", "dut", "canvass", "manifestly", "emphatic", "susy", "outgrowth", "homeward", "withered", "baiting", "surrendering", "fortification", "mingo", "spurt", "elation", "wail", "artistically", "elma", "epileptic", "crag", "hace", "feller", "enmity", "sanctum", "mazes", "jenks", "schutz", "materialistic", "boaz", "jahre", "gud", "oncoming", "racked", "cloister", "provincia", "fancied", "spoilt", "predisposed", "hydrochloric", "filippo", "strode", "agen", "marchand", "disorganized", "shaftesbury", "littoral", "denn", "aggressor", "giggled", "consummation", "fronting", "zola", "heute", "unfaithful", "executioner", "titular", "swears", "diminutive", "paring", "damning", "matrimony", "armas", "humbug", "signalled", "granulated", "ailment", "homely", "perpetuity", "stepfather", "disprove", "dinero", "bernhardt", "incurable", "dixit", "shoving", "furnishes", "anointing", "corinna", "strictest", "domiciled", "minx", "eclipses", "prise", "misdemeanors", "hadrian", "supremely", "mensch", "hastened", "perpetuating", "prostrate", "provisionally", "cocked", "raged", "boyne", "singularly", "elam", "gobble", "preposterous", "symbolized", "breech", "ripening", "pyramidal", "shee", "choruses", "obstructing", "phosphoric", "parquet", "vint", "pasquale", "reparation", "amply", "damask", "rejoined", "impotent", "spits", "papacy", "thimble", "lacquered", "ablaze", "simmering", "nettie", "grasshoppers", "senatorial", "thawed", "unexplored", "transpired", "toulon", "fortifications", "dens", "loafer", "quin", "insurmountable", "prettier", "peu", "haystack", "komen", "chaque", "confining", "louvain", "etchings", "impenetrable", "gymnastic", "tink", "purr", "duped", "stifling", "realises", "vindicated", "bund", "invades", "oust", "suo", "dipper", "signified", "talkers", "exemplify", "inane", "byways", "ibsen", "justus", "bluntly", "bask", "mermaids", "contemplates", "inglis", "defensible", "spinster", "goblets", "interrogated", "yolks", "famille", "dello", "magdeburg", "tarnished", "deducting", "fie", "brimming", "ridiculed", "baie", "ionia", "olden", "herne", "unending", "abominable", "rattled", "basse", "farmhouses", "tambourine", "venomous", "impressively", "inextricably", "etexts", "tapering", "prinz", "unjustly", "rehearse", "apertures", "seducing", "screeching", "reedy", "ceded", "sido", "imbued", "fearsome", "bureaux", "sleds", "christendom", "biographer", "wreak", "planta", "bridegroom", "swarming", "hava", "accomplice", "vivre", "moni", "mui", "ili", "servi", "irregularity", "gash", "impeded", "gravestone", "pompous", "sunt", "subvert", "hanno", "instrumentality", "barnaby", "antwort", "impassioned", "mous", "esau", "desperado", "flavoring", "mouton", "bau", "contagion", "archimedes", "desecration", "pocketbook", "anselmo", "misinterpreted", "garlands", "varma", "mongol", "audacious", "midshipmen", "degrades", "maggiore", "protestantism", "soreness", "boldness", "schip", "inhalt", "otras", "cassius", "powdery", "exportation", "diverge", "loosened", "misunderstand", "virility", "inalienable", "norden", "untamed", "eben", "viel", "xxviii", "meddling", "objecting", "gib", "shoddy", "salutation", "altercation", "octagonal", "mended", "navigators", "notches", "odysseus", "unfavourable", "abject", "heretical", "riveted", "quiescent", "strangeness", "rideau", "tincture", "erecting", "tenderer", "wirtschaft", "lucian", "jaar", "persevere", "fittest", "tarnish", "isthmus", "giuliano", "wordt", "hildebrand", "feu", "treads", "lengthen", "bahn", "prodigious", "spoonful", "sociable", "requisitions", "deftly", "raucous", "toasts", "exaggerate", "odes", "blushed", "saddest", "grinds", "immorality", "addington", "marcellus", "ciencia", "wench", "celle", "spontaneity", "illusory", "sympathize", "faggot", "barrows", "tantamount", "slaughtering", "dissected", "borrows", "frigid", "hemispheres", "woollen", "musick", "speculating", "pawns", "outermost", "selwyn", "westphalia", "augmenting", "winded", "poder", "methinks", "rambles", "namur", "tyme", "dawning", "lait", "klang", "congratulating", "sempre", "flagrant", "wane", "loins", "uneventful", "quis", "scoundrels", "distraught", "assassinate", "unwavering", "confidentially", "piecemeal", "soll", "inferiority", "burnished", "clothe", "swelled", "vides", "breda", "gentleness", "staked", "rigidly", "simile", "phalanx", "hindering", "sloped", "sifting", "fixe", "isobel", "loudness", "guillotine", "reverting", "dionysus", "leanings", "groans", "herbst", "canker", "keener", "embellishment", "confesses", "mistresses", "breakwater", "smuggler", "busily", "poached", "aram", "shopkeeper", "hailing", "imparted", "traduction", "contradicting", "headlong", "captor", "indelible", "tethered", "whiteness", "grazed", "unfulfilled", "acquittal", "meilleur", "fluently", "ascribe", "stalked", "deluded", "trembled", "gens", "doon", "unobserved", "labored", "tete", "twitching", "smacks", "silber", "troughs", "unbelievers", "hungerford", "brothels", "skilful", "werk", "basta", "bolder", "omits", "endures", "heeft", "silencio", "laski", "selle", "pueden", "impersonation", "hote", "lavinia", "intents", "unconnected", "ovum", "pruned", "wedded", "lashed", "valladolid", "contentions", "bickering", "whaler", "unobstructed", "menschen", "fondling", "cref", "laissez", "ricks", "spenser", "astounded", "permanency", "smacked", "personen", "pallas", "anatole", "sleet", "disgraced", "philippa", "royaume", "grooved", "resigning", "appareil", "alcove", "termine", "ungodly", "felling", "landes", "hout", "ois", "disclaimed", "aucun", "upp", "appartement", "couleur", "montagu", "steamship", "condescending", "recounting", "breeches", "appellation", "mitglied", "abbe", "montes", "exemple", "handsomely", "fille", "segovia", "untenable", "messer", "deformities", "necktie", "huis", "xxvii", "tardy", "disregarding", "matron", "seaward", "uppermost", "adolphus", "ciphers", "nibble", "heim", "volver", "exerting", "fenn", "fleeces", "industrious", "foie", "decayed", "proprietorship", "essere", "allgemeine", "umsonst", "harps", "hedged", "cleanest", "selon", "teutonic", "viceroy", "maintenant", "ingrained", "caspar", "swordsman", "commissary", "yellows", "habitually", "naman", "maxime", "majorities", "rendus", "mummies", "conquests", "brimstone", "quand", "trowel", "tyndall", "profiting", "beseech", "hitched", "mucha", "mair", "smelt", "fatale", "margery", "yearn", "mismo", "culprits", "trinkets", "whig", "enchant", "austere", "earths", "selbst", "storehouse", "cowhide", "plumage", "antecedents", "diabolical", "tugs", "rapier", "unspoiled", "haughty", "relinquished", "assaulting", "admirals", "cosi", "meisjes", "esmeralda", "captivate", "terug", "deterred", "agostino", "apathetic", "uninteresting", "lyre", "yawning", "centralization", "prunes", "buller", "cossacks", "attuned", "herons", "raiding", "deft", "seething", "carne", "jardins", "alligators", "instigated", "superstructure", "husk", "grandiose", "clerkship", "concisely", "sah", "scepticism", "quatre", "constancy", "plats", "countryman", "insufficiently", "reappear", "boudoir", "affinities", "glades", "crutch", "rioting", "espoused", "mamie", "frisch", "discursive", "disputing", "unpaved", "lieber", "repudiation", "clarice", "dimples", "inhabitant", "flourishes", "colonized", "hessian", "feder", "ardour", "hing", "erat", "arbeit", "levant", "imitators", "talkative", "phonograph", "speculators", "sty", "quelques", "smelting", "cuss", "slats", "transcribing", "manoeuvre", "offends", "lumpy", "landlocked", "embattled", "wisest", "giulio", "zin", "diminution", "ging", "rencontres", "southernmost", "freckles", "civilised", "airship", "galls", "ammon", "imitated", "inflicting", "inducement", "heave", "cud", "gegen", "proclamations", "rarer", "slowness", "wrongfully", "lessening", "aurelius", "pout", "cognate", "mire", "sufferer", "mores", "raindrops", "elegy", "sanctification", "sanded", "indignant", "godless", "sloop", "politeness", "baffling", "hurriedly", "characterise", "purporting", "passo", "taunt", "ick", "hinting", "schoolboy", "bailiff", "outpouring", "deflected", "inflection", "lettres", "myrrh", "infuse", "chaff", "defaced", "mimicking", "counseled", "showy", "altruistic", "aldermen", "commends", "moorish", "etre", "bobbing", "defiantly", "colonels", "posible", "bli", "cualquier", "pathos", "battleships", "smartly", "laments", "spied", "playthings", "argumentative", "roused", "aloof", "snore", "charred", "industria", "hij", "ihrer", "dunstan", "bolshevik", "unsound", "hatter", "creepers", "recreations", "profusely", "intelligences", "sorrel", "reverie", "colloquial", "callous", "oom", "perplexing", "splashes", "homesick", "gainer", "ochre", "dois", "bystander", "quell", "repulsion", "capitan", "balk", "imagines", "softens", "harnessed", "exuberance", "flocking", "unnumbered", "outbursts", "undying", "stubble", "bande", "amie", "envie", "tle", "quivering", "ete", "euery", "wein", "sark", "commending", "sofort", "flattery", "soothes", "millstone", "mortgaged", "impossibly", "giorno", "compels", "succes", "drunkenness", "indulged", "habitable", "spn", "subtleties", "ministre", "trappings", "afterthought", "damsel", "euphrates", "schoen", "decorum", "hommes", "spoiling", "yellowing", "robs", "giselle", "earthenware", "incendiary", "selina", "lenient", "dined", "idly", "freda", "devilish", "aristocrat", "scathing", "twinkling", "nichts", "pantomime", "familie", "wanderings", "decimated", "overthrown", "moored", "peered", "bores", "regrettable", "strangled", "maxims", "cama", "engrossing", "fere", "jezebel", "lethargy", "komm", "frolic", "painstaking", "goths", "finality", "toppled", "ewes", "mending", "wrestled", "hurtful", "alternation", "receding", "gast", "laban", "neuen", "paix", "candelabra", "outposts", "treading", "hedwig", "downy", "conformed", "characteristically", "canadien", "goldsmiths", "swarms", "geographers", "somos", "evolutions", "escorting", "irregularly", "oratory", "sharpest", "palisade", "moccasin", "circumcised", "growled", "auxiliaries", "benefactors", "terse", "insistent", "peppered", "sterne", "avez", "utile", "frightful", "trite", "gentler", "vex", "dilapidated", "mien", "avance", "wollen", "dela", "stubby", "sixpence", "hoch", "visto", "impaled", "forays", "charon", "flanks", "pavia", "curbed", "efficacious", "philanthropist", "thaddeus", "convinces", "rede", "minder", "orator", "abet", "dien", "ropa", "sence", "steppe", "plowed", "sires", "transgressions", "lingers", "smothering", "encampment", "roque", "prophesy", "recast", "misrepresentations", "bards", "bestial", "neuf", "buddhas", "oozing", "vicenza", "richelieu", "curd", "bookish", "subdue", "raking", "denouncing", "ascertaining", "stags", "vittoria", "soldered", "privateer", "milly", "vicarious", "traverses", "seedy", "imbedded", "elysium", "quenched", "antithesis", "envoyer", "awakens", "accentuate", "squandered", "sortie", "withal", "eyelashes", "colliers", "minuten", "tilden", "asti", "blindfold", "rampart", "possessive", "feldspar", "facades", "idealist", "constables", "mourns", "solidified", "cura", "conceit", "needful", "locusts", "thatch", "cappadocia", "weathers", "grunts", "thicket", "zou", "depraved", "continence", "treatises", "renseignements", "sauvage", "prying", "rascals", "voyageurs", "rudely", "weeps", "deplorable", "smacking", "aggravate", "quoth", "snowstorm", "lacuna", "chambres", "rawson", "levelled", "incessantly", "toit", "apres", "flaring", "neues", "langton", "testa", "lye", "ditty", "pestilence", "rapide", "thoroughfare", "skiff", "belligerent", "impeached", "hight", "eclipsed", "conspired", "catacombs", "agonizing", "bottomless", "sows", "attributing", "londoners", "faut", "sardis", "excruciating", "punctual", "runaways", "boniface", "grafted", "watercourse", "propped", "beaton", "telegrams", "staking", "conversing", "acetylene", "calamities", "viennese", "fancies", "accuser", "bystanders", "minos", "ganymede", "enjoined", "animating", "mercurial", "bargained", "repugnant", "citron", "clave", "pageants", "grosses", "tacked", "zeigen", "supplant", "slates", "prue", "corroborated", "andros", "tipsy", "tabac", "recognisable", "neuralgia", "timbre", "clasped", "pecking", "womanhood", "crimean", "exorbitant", "tish", "grieved", "experimenter", "tallies", "serpents", "tampered", "severally", "bedstead", "acquis", "bostonian", "whirlpools", "sotto", "caressing", "reliefs", "tassels", "culpa", "whiter", "froth", "obliterated", "regalia", "peerage", "deceitful", "storied", "unprofitable", "doublet", "astonishingly", "dein", "cannibalism", "menos", "mera", "pretender", "mosses", "subside", "burney", "conspiring", "nostra", "retaliate", "deafening", "beleaguered", "jarring", "baptismal", "magdalen", "brackish", "direkt", "vse", "tinsel", "edel", "scrutinize", "adverb", "mumbled", "commis", "yams", "breve", "mut", "worthiness", "lazily", "disarming", "ween", "woefully", "kaj", "promontory", "eres", "paye", "smote", "taunting", "etruscan", "outwards", "rend", "hezekiah", "depravity", "wealthier", "onda", "scientifique", "disagreeable", "drei", "castes", "corrupting", "massif", "murat", "kine", "lus", "overtures", "pharaohs", "fraudulently", "plunges", "gibberish", "cela", "tammany", "boulevards", "redistributing", "darken", "dowry", "chateaux", "quam", "skirting", "adieu", "kindling", "affluence", "passable", "shouldered", "hilarity", "fulfils", "predominance", "mitten", "conquerors", "thar", "admonition", "ferdinando", "perchance", "rots", "demetrius", "precocious", "rood", "sachsen", "luzon", "moravia", "byzantium", "gaf", "altre", "repress", "domini", "loro", "moiety", "steeply", "darned", "locum", "denser", "moorland", "coincidences", "divinely", "skimmed", "lassie", "congratulation", "seminaries", "hotchkiss", "trotting", "ambushed", "combing", "travesty", "bewildering", "hunchback", "aback", "deepens", "griff", "enactments", "scaly", "heaped", "fantastically", "cobham", "oracles", "untied", "quince", "lage", "profusion", "conjectures", "glint", "incitement", "hansel", "figuratively", "sorceress", "stoic", "fatigued", "unconsciousness", "quarto", "improvise", "incipient", "avalanches", "cheval", "crackling", "creeds", "thro", "outrun", "extenuating", "blackberries", "amiss", "cavernous", "snodgrass", "darlings", "reprieve", "shanty", "rapping", "proffered", "rowena", "livid", "distasteful", "distinctively", "luft", "hares", "overturning", "attestation", "bravado", "overpowering", "ravings", "childless", "voix", "grecian", "proportioned", "lavishly", "smite", "forthright", "kritik", "foretold", "dado", "engraver", "saddled", "tortures", "crusts", "vamos", "loge", "presupposes", "trickery", "adherent", "fragen", "populi", "astrologers", "wuz", "vindication", "opined", "falter", "chatty", "auvergne", "philistines", "retainers", "tener", "cherbourg", "imperfection", "sorrowful", "unchanging", "predominate", "wodehouse", "molested", "titres", "hyena", "wedlock", "erstwhile", "vist", "obtuse", "caudal", "sternly", "chanted", "jonson", "klug", "savour", "stabs", "indecency", "lingered", "elke", "feasting", "suffocation", "softest", "sniffed", "lurks", "tenses", "lawlessness", "recollect", "alors", "projectiles", "heures", "larch", "interrogatories", "dess", "whet", "impatiently", "suspecting", "dessous", "aline", "disjointed", "seizes", "reine", "triomphe", "thebes", "doer", "pandemonium", "lege", "ravished", "discerned", "seulement", "icicles", "fanaticism", "flamed", "godsend", "rubbers", "eder", "anderen", "rehearsed", "alix", "outrageously", "bagdad", "petticoat", "inhabiting", "unrestrained", "injures", "botha", "pigtail", "appraising", "enthralled", "strays", "embroiled", "toussaint", "armistice", "ellery", "damped", "southerners", "fissures", "clinched", "forlorn", "apologetic", "absolution", "inordinate", "burdett", "clank", "individualistic", "conseils", "marts", "obra", "artemisia", "evermore", "engendered", "manchu", "disconcerting", "priestley", "appropriating", "shinto", "attentions", "regno", "gawd", "inhaling", "calmer", "passers", "fluttering", "irishman", "brier", "phoenician", "hundredth", "firstborn", "coves", "armes", "betraying", "screech", "fetches", "paltry", "carelessness", "threes", "broadside", "importante", "doers", "sods", "technicalities", "thais", "groaning", "beckons", "rejoiced", "quickness", "jeunesse", "onze", "entertains", "turban", "freie", "ruffles", "infatuation", "gaiters", "meisje", "geben", "nulla", "plutarch", "curving", "misrepresent", "tankard", "xxxix", "amorous", "kurz", "overflowed", "jesu", "weaned", "armchairs", "appartements", "vagueness", "grumble", "wronged", "politiques", "fireflies", "hoisting", "falsified", "dialectical", "schatz", "labours", "espagne", "flatly", "harsher", "inciting", "malleable", "indecision", "unselfish", "shem", "starke", "alight", "epochs", "nosotros", "genial", "langues", "revolved", "ifad", "snowed", "cachet", "fortify", "cherubs", "armature", "implicate", "tolling", "provisioned", "sista", "syriac", "dived", "baffles", "infamy", "dapper", "belfry", "elysian", "odious", "rehearsing", "ellipsis", "outhouse", "romanesque", "gobierno", "vanquish", "imparts", "sobs", "laudable", "thawing", "tienen", "writs", "omnipresent", "gesundheit", "hovered", "devouring", "renunciation", "stunted", "munching", "fumbling", "purl", "lasse", "banal", "rears", "portico", "excites", "placard", "quartermaster", "peculiarly", "placards", "transposed", "ganga", "thrace", "waistcoat", "vier", "perusal", "petrus", "childlike", "shamelessly", "saison", "tomo", "cloaked", "lichens", "brotherly", "uninhabited", "sawn", "unbelief", "overtaking", "transference", "arjuna", "pliable", "mantua", "sardines", "dictating", "studien", "crystallized", "reprisal", "blighted", "kunz", "dissect", "rumbling", "perceptible", "blazes", "encircled", "odette", "saxons", "transcending", "snout", "goodly", "philosophically", "directeur", "bigot", "bramble", "persisting", "bouillon", "scribbled", "celibacy", "beaucoup", "tooting", "gruppe", "displeased", "portant", "lather", "falstaff", "unchallenged", "strayed", "commutation", "spiritualism", "gracia", "omnia", "engender", "fini", "jurists", "cloaks", "streaked", "downe", "chieftains", "garrick", "perches", "scrapes", "silhouetted", "crouched", "juana", "gradation", "tole", "unanimity", "radnor", "tycho", "impeding", "reino", "grisly", "fornication", "contro", "sassafras", "heure", "tramps", "assis", "blossoming", "barbary", "irate", "partisanship", "wean", "omelet", "suh", "sheaf", "folios", "iban", "dictum", "refutation", "posthumous", "inclinations", "ledges", "wenig", "muchas", "enlisting", "roars", "swindle", "revolting", "candied", "plaine", "macedon", "dingy", "bons", "frieze", "staircases", "horas", "multiplies", "impressing", "twirling", "lachlan", "entwicklung", "sergeants", "overcoat", "shak", "tyrannical", "infinitesimal", "scharf", "spouting", "origine", "humbling", "truer", "limes", "katharina", "martians", "sullen", "machin", "prolonging", "battering", "superficially", "upstart", "ihm", "imps", "divulged", "shrunken", "quays", "reprehensible", "provokes", "distancia", "dedicating", "confessing", "forbade", "incursions", "viele", "pieced", "arching", "bett", "gloriously", "gourds", "worsted", "nevermore", "sanguine", "acorns", "slung", "rowers", "shockingly", "viaje", "vagrant", "empties", "bight", "entra", "fells", "morgen", "lors", "dormer", "geht", "ahab", "prolongation", "uprooted", "talons", "germaine", "dualism", "intrigues", "cannibals", "pounce", "marchant", "vedas", "panier", "mouthfuls", "instilled", "calyx", "valour", "litle", "mightily", "cuzco", "unwieldy", "perpetuated", "steht", "exaggerating", "smoldering", "peuvent", "snub", "coarsely", "voz", "withstanding", "thickens", "hissing", "crumpled", "topmost", "intrude", "behest", "pitkin", "snatching", "resto", "charmer", "escapades", "haphazard", "infirm", "pontiff", "menage", "preaches", "varios", "growling", "indescribable", "arraignment", "eugen", "kentish", "napping", "sabatini", "toppling", "sten", "astley", "bouton", "excellently", "ier", "pails", "burly", "derecho", "formule", "hillsides", "segunda", "xxix", "contenu", "divest", "mange", "unfairness", "abated", "sohn", "tiniest", "mowed", "sano", "overhauled", "caskets", "lecteur", "congenial", "lut", "fervently", "sprained", "harlot", "ravages", "choix", "superhuman", "conclave", "humanly", "altura", "livia", "causa", "dentro", "magnificence", "sacramental", "peddler", "eterna", "mystere", "fayre", "glared", "adverbs", "donc", "ugliness", "constantia", "shavings", "lusts", "nunca", "helplessly", "quintessence", "throes", "malabar", "crowbar", "blots", "nettles", "scud", "raked", "cruised", "stupidly", "lashing", "gaudy", "merriman", "swoon", "buckskin", "kommt", "recluse", "displacing", "neapolitan", "blacker", "haarlem", "quel", "aspires", "telegraphic", "quali", "frescoes", "patted", "puritans", "gentlewoman", "somme", "meinen", "nouveaux", "victors", "revels", "droves", "slur", "laetitia", "eisen", "phrased", "puddles", "nobleman", "kort", "assailant", "luxuriously", "flatness", "pardons", "debauchery", "wij", "extravagance", "buttress", "entrada", "junge", "rigors", "foregone", "stellung", "overjoyed", "bourgogne", "newhaven", "apologists", "fut", "allemagne", "vind", "waddington", "refilled", "whiff", "burrowing", "strolled", "estos", "regen", "encrusted", "clashed", "harpoon", "sombre", "machinations", "hearse", "libertad", "roamed", "approbation", "nen", "wut", "calmness", "confound", "lengthwise", "fatter", "abstained", "chasse", "christen", "comparaison", "valeur", "senile", "cobwebs", "tusk", "hellish", "conquers", "iglesia", "preceptor", "claro", "ugliest", "ungrateful", "renounced", "clashing", "decomposing", "sauter", "sain", "postponing", "israelite", "graver", "flees", "torrid", "absalom", "preconceived", "zug", "engrave", "dishonor", "hoarding", "bauxite", "barrack", "compatriots", "stereotyped", "conscription", "maken", "philosophie", "minna", "tradesman", "embodying", "unscathed", "moslems", "courageously", "snugly", "tarry", "fevers", "interrogate", "eocene", "muddled", "sklaven", "leonora", "militaire", "subjection", "punctuality", "hoarse", "misfortunes", "vexed", "delos", "vanquished", "ibi", "inquisitor", "floored", "inheriting", "historique", "plied", "beaters", "twang", "ombre", "conceiving", "syrians", "mij", "indivisible", "poetical", "stagger", "crusted", "heraldic", "belli", "maladies", "adjudged", "adolphe", "fou", "wissen", "turrets", "pression", "efter", "calms", "misgivings", "presumes", "juggler", "obeys", "stifled", "preposition", "vestibule", "heer", "mournful", "ameliorate", "scheming", "disarmed", "baseless", "voile", "picturing", "dismemberment", "quartered", "agrippa", "lioness", "appendages", "feverish", "pavillon", "couleurs", "neglects", "suckling", "scythe", "heaving", "homily", "pensive", "lado", "fum", "upshot", "sifted", "felder", "fuerte", "boisterous", "sate", "alleviated", "outbuildings", "icj", "decanters", "elevates", "poitiers", "goed", "ferment", "bounties", "incursion", "aurelia", "thinned", "consternation", "hoisted", "aeroplanes", "auteurs", "antigone", "chirp", "dimmed", "yore", "scurry", "growths", "thoth", "halve", "conversant", "torpedoes", "sovereigns", "unencumbered", "eliciting", "tamed", "fiends", "farmyard", "condense", "garbled", "tallow", "unforgiving", "immobile", "indisputable", "unkind", "prismatic", "aunty", "paucity", "expediency", "frisian", "lieutenants", "philology", "prophesied", "backwoods", "pheasants", "slouch", "amulets", "cargoes", "accentuated", "eddies", "kategorien", "disobey", "literatur", "bandy", "watercourses", "amicable", "prospered", "savoury", "colloquy", "retorted", "fiftieth", "joyfully", "onder", "offensively", "plausibility", "magnate", "pillage", "vengeful", "lunatics", "satis", "nol", "edom", "impracticable", "misdirected", "weer", "surrenders", "manchuria", "playfully", "barony", "leyden", "gruff", "snatches", "buxom", "deciphering", "botanist", "deine", "timidity", "musty", "silences", "guineas", "hebben", "ministering", "strangle", "swerve", "proscribed", "chattering", "esser", "franconia", "dominions", "plateaus", "berthold", "spaniard", "plummet", "transplanting", "onlookers", "wissenschaft", "phebe", "easiness", "trepidation", "squatters", "plantain", "pepys", "frailty", "neutralized", "tangier", "ismael", "guten", "bateau", "mourners", "twos", "passageway", "reestablish", "fondo", "parsonage", "quien", "sulphide", "outcasts", "mortally", "oot", "agni", "carbonic", "unassuming", "disillusionment", "nouvel", "knead", "wilful", "gaol", "erudite", "appreciably", "equalize", "prepositions", "petits", "tarn", "endeavoured", "enl", "attentively", "interred", "indiscriminately", "encumbered", "herodotus", "favouring", "neutrals", "conspire", "recompense", "colonnade", "unde", "eustace", "abides", "yuh", "damen", "seus", "strove", "ogni", "dissenters", "imparting", "apologizing", "coups", "verdant", "secrete", "libris", "twirl", "noo", "beadle", "denizens", "cockney", "guppy", "leeches", "convoys", "manoeuvres", "shapely", "rooks", "shuddered", "stelle", "ornamentation", "lynching", "sommes", "perdido", "dictatorial", "uncomfortably", "defenseless", "glean", "amory", "ander", "edad", "gratified", "participle", "schlegel", "watchmen", "galleon", "travaux", "eten", "enim", "chafing", "betrays", "assyria", "inwards", "corsican", "libertine", "immeasurable", "esthetic", "testator", "distaste", "offshoot", "smithson", "resolutely", "friendliest", "uttering", "jacobus", "construe", "algemeen", "mourned", "despotism", "flotilla", "fragmentary", "anjou", "omniscient", "gladness", "frisky", "generalities", "condolence", "siddhartha", "brightening", "inimitable", "ineffectual", "armorial", "poppa", "thickly", "blossomed", "cistern", "tableaux", "latins", "phaeton", "fecundity", "malle", "caliph", "dysentery", "soir", "grenier", "funnels", "pasty", "cuffed", "peau", "tumult", "defoe", "curate", "donned", "wilks", "allegorical", "monotony", "reve", "ohr", "lucile", "amazons", "manon", "unabated", "plante", "curzon", "wohl", "marksman", "philosophic", "denna", "troubadour", "volgende", "truest", "hypnotized", "voitures", "rudeness", "felled", "alleen", "tinned", "concoction", "flay", "patter", "seinen", "tortoises", "roxana", "pli", "crone", "possessor", "wintry", "gode", "admonished", "wickedly", "laver", "shamed", "eluded", "incriminating", "unsealed", "misinformed", "tambien", "journeyed", "presenta", "sett", "magnificently", "unpunished", "albatros", "apostasy", "bereft", "lucretia", "hibernian", "vitriol", "vicarage", "vestry", "gleefully", "mercies", "paralleled", "entwined", "fosse", "taille", "resplendent", "thrall", "barked", "cormac", "sju", "unum", "scorned", "relapsed", "thicken", "sanaa", "ceci", "selene", "artfully", "pilgrimages", "fides", "blazed", "edda", "wheelbarrow", "maimed", "chor", "dernier", "duda", "pater", "meno", "mused", "jamais", "puffing", "besten", "wielded", "futurity", "quicksand", "trestle", "souffle", "rebus", "proces", "sentinels", "pardoned", "wormwood", "sighing", "harz", "awed", "shrank", "conceals", "glycerine", "staub", "abolitionist", "foamy", "aventure", "meunier", "unpainted", "knolls", "unwell", "unconscionable", "wedged", "outgrown", "evading", "commemorated", "lurid", "annunciation", "rumoured", "idee", "coalesce", "brougham", "windings", "strongholds", "burglars", "shrimps", "stirrup", "seria", "creo", "dictionnaire", "finde", "flopped", "elbe", "whitewash", "subservient", "suivante", "stubbornly", "benediction", "disobedient", "seamstress", "immortals", "euripides", "uninitiated", "mikko", "mond", "zwart", "briskly", "afflictions", "buon", "zon", "weariness", "ascendancy", "affront", "telephoned", "treasuries", "energetically", "tinge", "fingal", "defection", "murmurs", "slog", "gav", "dispersing", "tractable", "lapped", "syl", "petitioning", "clawed", "einmal", "winsome", "presuming", "englishmen", "equaled", "flog", "notte", "deferring", "quills", "oud", "practises", "unattainable", "lengthened", "dramatist", "grayish", "hallucination", "exhortation", "arousing", "hippopotamus", "wile", "forgeries", "chartres", "recline", "maitre", "remembrances", "disturbs", "chums", "determinate", "heeded", "telephoning", "sophocles", "humiliate", "erfurt", "wasser", "tomes", "ingen", "accompaniments", "clairvoyant", "shriek", "ferocity", "quoi", "withering", "procreation", "xxxi", "exasperated", "eerste", "groping", "soule", "pinnacles", "miser", "scaffolds", "reprisals", "culpable", "unserer", "asunder", "qualms", "unharmed", "sheaves", "tritt", "godmother", "impresses", "lidia", "plusieurs", "buttoned", "sprouted", "armoury", "marshalling", "longue", "omelette", "disintegrated", "forgetfulness", "muerte", "stilts", "samaritans", "knocker", "underfoot", "roofed", "jinn", "nunc", "primeval", "sakes", "horsemanship", "aviators", "destinies", "jure", "sherbet", "nutritive", "hurrying", "helden", "tepid", "opportune", "intuitions", "dissuade", "hemmed", "personified", "cornice", "smock", "musket", "beautify", "tannery", "sooty", "buckled", "purveyor", "kindled", "provencal", "schein", "stairways", "methodists", "bourg", "pretence", "questioner", "repute", "nakedness", "scabbard", "covet", "debe", "rippling", "mony", "nelle", "rationalism", "wistful", "admires", "hissed", "overpowered", "pervades", "mele", "tirade", "elucidation", "prongs", "fumbled", "acte", "confided", "mumbling", "abstaining", "giotto", "punkte", "lancers", "heimlich", "waren", "confederates", "stretchers", "demosthenes", "warum", "avait", "devonian", "infinitum", "justo", "antti", "ointments", "tugging", "opulence", "appomattox", "bentham", "coursing", "beschreibung", "patrician", "zacharias", "melodramatic", "effet", "inexperience", "palabras", "aantal", "rime", "casement", "kalle", "serially", "gefunden", "apprised", "thoughtless", "comparer", "goad", "parle", "muddle", "levites", "christus", "blasphemous", "unaided", "candidature", "clapped", "fatherland", "evergreens", "recede", "dears", "willkommen", "spry", "objets", "toki", "maggots", "calor", "hominem", "tints", "waver", "handkerchiefs", "punishes", "salut", "acquiescence", "disaffected", "manors", "chronicled", "laure", "inundation", "earshot", "omens", "brule", "transfiguration", "punctured", "coughed", "repaying", "filial", "mocks", "niggers", "refrained", "shallower", "durer", "patriarchs", "respectability", "commode", "overbearing", "townspeople", "adoring", "trodden", "reaped", "bequeathed", "grumbling", "elude", "decently", "metaphorically", "tripe", "glitters", "ahmet", "austerity", "mitte", "informe", "enjoin", "dazu", "boyish", "egotistical", "neared", "claes", "rostov", "diverging", "estoy", "uninvited", "irkutsk", "trappers", "aniline", "tuk", "spilt", "forgetful", "conceding", "brightened", "inconveniences", "maun", "rigour", "evinced", "uneasiness", "afresh", "taal", "bunks", "ducked", "situate", "sowie", "escapade", "loomed", "egbert", "hungarians", "clamor", "abdallah", "hond", "pews", "workhouse", "handbuch", "unorganized", "whalers", "smuggle", "laboring", "nooks", "wud", "autocratic", "titania", "broder", "shyly", "stewed", "disguises", "stowed", "unmanageable", "denunciation", "squeal", "ducking", "throb", "scorch", "perusing", "duels", "villainous", "caius", "pythagorean", "steadfastly", "abstention", "genealogies", "ruthlessly", "falsify", "swagger", "flicked", "emigrate", "arbour", "accomplices", "nonproprietary", "gebraucht", "toothless", "frankincense", "commendations", "comprehended", "bravest", "crevice", "papel", "telltale", "typewritten", "progenitors", "forges", "loosed", "madcap", "neigh", "evie", "casimir", "persecute", "voracious", "foret", "rescuer", "massacred", "signification", "quarrels", "remoteness", "dominus", "botticelli", "balmy", "hele", "splinters", "kleiner", "epithet", "blonds", "ravenous", "mongols", "camphor", "savagery", "ober", "navigated", "dieppe", "mies", "pretensions", "thunders", "prins", "diogenes", "comings", "danke", "farthing", "crevices", "wringing", "tearful", "betwixt", "florent", "unmistakably", "unu", "massed", "plucking", "slavonic", "reprimanded", "rebelled", "thunderous", "rolle", "encloses", "sorties", "revives", "toleration", "suitors", "minutiae", "deviated", "sleight", "burman", "skirted", "coachman", "bigots", "reappeared", "comprehending", "reckons", "inexhaustible", "canny", "fainted", "pianoforte", "rifts", "winking", "firmament", "hovers", "thoroughness", "confessor", "gooseberry", "aimlessly", "pronouncing", "agassiz", "dazzled", "inborn", "manera", "ould", "consuls", "eure", "doria", "newness", "ascetic", "bearable", "russet", "specie", "hothouse", "incas", "skein", "virginie", "mettle", "ojo", "endeavored", "matin", "demonstrative", "seis", "detta", "bigoted", "discordant", "lilacs", "levying", "elles", "oriel", "buoyed", "malady", "brahmin", "grandsons", "tempers", "quinine", "thirtieth", "sige", "grog", "fester", "permeated", "retards", "resentful", "headlands", "saintly", "oude", "aught", "cornelis", "adjuncts", "jeweller", "wooing", "conjunctions", "embellish", "cordes", "moonlit", "intercepting", "denounces", "besser", "wegen", "dienst", "corks", "obscuring", "tages", "nullify", "corroborate", "envied", "chins", "runt", "nursed", "loathsome", "cosas", "althea", "dando", "icebergs", "sacking", "settee", "driest", "scipio", "stealthy", "flaunt", "mistaking", "saxe", "dyspepsia", "tryst", "cede", "annihilate", "candidly", "honorably", "shifty", "ello", "deceptions", "snorted", "signe", "shivered", "teem", "replenished", "assailants", "degeneracy", "giovanna", "consummated", "cosimo", "cotes", "obstinate", "farquhar", "retrace", "revolvers", "lurch", "gregarious", "allee", "oor", "nightgown", "bombard", "missus", "mystified", "drooping", "diable", "inconsiderate", "swirled", "darted", "warlike", "colons", "supplication", "fretted", "gauged", "suet", "overhanging", "impropriety", "maligned", "thackeray", "nought", "barbarous", "grandi", "olly", "diu", "scepter", "writhing", "enticed", "schmuck", "gasps", "exclaim", "greve", "vestiges", "rustling", "recaptured", "marauders", "spars", "howls", "answerable", "inky", "ock", "sneer", "allay", "derision", "zog", "dutifully", "octavo", "jerrold", "maddening", "plundered", "damit", "henriette", "decry", "buen", "devant", "conspirator", "luring", "gallantry", "hewn", "whisked", "pericles", "desertion", "rumania", "yow", "wherewith", "siliceous", "mund", "circulates", "signore", "coldly", "envoys", "restorer", "staves", "coldness", "existe", "friesland", "orden", "riviere", "gusty", "brazier", "bayreuth", "sonntag", "semaine", "godliness", "docile", "maliciously", "vole", "cantons", "siglo", "enveloping", "piedra", "subito", "tangles", "meanest", "hollows", "luckiest", "officiate", "mumble", "espacio", "oppress", "grandfathers", "usury", "russes", "greedily", "vizier", "ojos", "nostril", "tombstones", "wavering", "barbarism", "vienne", "alway", "surmise", "blanch", "inscrutable", "campagne", "syne", "xxxii", "saluted", "protectorate", "hieroglyphics", "materialist", "landlady", "blameless", "amalia", "absurdly", "garnished", "fernand", "corporeal", "passivity", "partiality", "circumscribed", "steno", "disposes", "berta", "emanate", "rummage", "headstrong", "plies", "scantily", "waar", "befriended", "professing", "nestling", "piedras", "immortalized", "leper", "animus", "dimple", "noblest", "supine", "bloodthirsty", "squint", "vitals", "lamenting", "benedetto", "vindictive", "overtook", "goe", "palast", "triumphed", "scanty", "difficile", "vagaries", "undaunted", "lucan", "hemming", "nuevas", "defiled", "faltering", "saracens", "tisch", "eke", "conceited", "denys", "naissance", "laymen", "shopkeepers", "mortification", "combats", "indulgences", "tard", "fattening", "drench", "digesting", "cupola", "hund", "kommer", "canst", "idleness", "lunge", "mahmud", "minuet", "entombed", "fers", "diverged", "spouts", "pontifical", "glided", "sleeplessness", "iago", "axed", "overdone", "socratic", "revulsion", "rosamond", "schwarze", "criticising", "porpoise", "nowe", "oligarchy", "psychical", "rives", "houten", "fanned", "berge", "wagging", "germinate", "chrysanthemums", "misdeeds", "acto", "earnestness", "wetted", "undercurrent", "steerage", "granary", "befitting", "whitish", "irreconcilable", "giveth", "concocted", "essayist", "epicurean", "blacked", "refit", "boite", "unwashed", "detaining", "shod", "oratorio", "befall", "appurtenances", "wearily", "northernmost", "trollope", "enchanter", "unscientific", "withstood", "sandhills", "heaviness", "knapsack", "animaux", "calcul", "consciences", "inflected", "linseed", "caisse", "staccato", "dels", "agamemnon", "dodged", "refusals", "outrages", "cuneiform", "footstool", "dopo", "uncircumcised", "emblazoned", "mettre", "wrangling", "dorcas", "confiscate", "bloods", "odours", "mongrel", "forewarned", "degenerated", "eventide", "impairing", "dispossessed", "meagre", "mopping", "iver", "fantastical", "dorf", "yama", "laatste", "chintz", "nebulous", "slink", "lineal", "droll", "honouring", "grenadier", "anachronism", "methodically", "stiffened", "athenians", "hautes", "aleppo", "whimper", "whomsoever", "viciously", "fiddlers", "endow", "raum", "indistinct", "counterbalance", "razed", "anzahl", "invents", "loungers", "wilberforce", "manus", "tenfold", "scoured", "schule", "carley", "knotty", "stewardess", "furthered", "chancel", "inexorably", "mitglieder", "worships", "ironed", "inhabits", "domestication", "olof", "japon", "appendage", "geographer", "omnis", "naphtha", "clairvoyance", "frente", "aeneas", "narrates", "girdles", "heartbroken", "parola", "lameness", "offal", "smithy", "dawns", "frais", "couverture", "staid", "encircling", "verte", "wove", "pithy", "caressed", "infinitive", "hysterically", "incantation", "blissfully", "shirk", "pangs", "monsignor", "fulness", "commande", "domestics", "unpretentious", "poachers", "galvanic", "narr", "joven", "parlance", "lethargic", "drunkard", "conveyances", "steinmetz", "cowper", "bronzes", "essa", "knell", "profited", "flavia", "startle", "algernon", "exterminate", "heikki", "exalt", "nein", "zal", "interludes", "jahren", "bide", "suitor", "russe", "bevy", "gravelly", "inconspicuous", "juste", "wisps", "urbane", "hoek", "nebuchadnezzar", "diffusing", "stupor", "gratuitously", "aimless", "parfait", "flit", "quietness", "accede", "sicher", "overshadow", "xli", "principale", "turnips", "statuette", "theobald", "dwindled", "dispenses", "fertilizing", "ower", "narcissist", "sextant", "falsehoods", "swampy", "euch", "wast", "obtenir", "donning", "cecily", "sappho", "estancia", "wurden", "fama", "lustful", "guano", "presbyterians", "worshiped", "duque", "autem", "rebuked", "cloisters", "luella", "presumptuous", "toothache", "presage", "boars", "afore", "dour", "moistened", "kegs", "unadulterated", "reciprocate", "rade", "quia", "begat", "propelling", "ripen", "suffocating", "athos", "grasse", "cinq", "xxxiii", "brawn", "frowning", "gaius", "matchless", "boatman", "unconcerned", "dood", "orthography", "conjured", "assyrians", "selv", "vaulting", "fonte", "gossiping", "freshen", "tugged", "gog", "outdone", "detest", "paraded", "trifling", "undergrowth", "enamored", "carlotta", "ceux", "cuatro", "methode", "ulterior", "puro", "heracles", "whirled", "passim", "thei", "gebruik", "vraag", "jovial", "scoundrel", "romany", "xxxviii", "duplicity", "meddle", "exaltation", "handiwork", "andras", "joyously", "heaping", "strident", "oration", "grunted", "riche", "pilote", "wampum", "dreading", "humorist", "nourishes", "vite", "cun", "combative", "winked", "unhappily", "rube", "chronometer", "squaring", "wring", "apparitions", "shrieking", "graaf", "erst", "scurvy", "peacocks", "ophir", "wouldst", "pocketed", "enormity", "coarser", "hypnotism", "oeil", "dissociated", "exclaims", "ceaseless", "emblematic", "lerwick", "fertilize", "disengage", "commonest", "daj", "unreserved", "lessens", "judicially", "vend", "smattering", "taunts", "stealthily", "ripened", "cleverness", "roped", "sorcerers", "clang", "sardinian", "waltzes", "sunlit", "attests", "parched", "peaceable", "achtung", "stanzas", "infuriated", "dismounted", "incongruous", "kindest", "stam", "intervenes", "vieles", "bonnets", "bared", "frenchmen", "callow", "edicts", "lemuel", "inattentive", "transmutation", "sweeten", "confide", "voiceless", "sombrero", "isidore", "headdress", "nuestros", "tannin", "limite", "boughs", "naturel", "overseers", "presentment", "sprigs", "amiens", "diez", "prudently", "foresees", "patronizing", "presentable", "pales", "dais", "adornment", "precipitating", "hearken", "insolence", "blockhead", "einige", "patting", "hippocrates", "elaborately", "lundi", "gaslight", "presides", "divested", "pith", "eaux", "transvaal", "gaff", "disintegrating", "folie", "frock", "bleue", "flambeau", "fuming", "veel", "chattel", "wrest", "forgives", "waterless", "effectual", "unimproved", "paddled", "inkling", "vigils", "schoenen", "garcons", "gauntlets", "patria", "blacksmiths", "menor", "ploughing", "timon", "parsimony", "typified", "darting", "ashen", "blunted", "snarl", "comptoir", "echt", "pained", "inexcusable", "laud", "mutterings", "precipice", "geschrieben", "recalcitrant", "wos", "thoughtfulness", "harshness", "ailes", "neuve", "limping", "darum", "utters", "processions", "gluttony", "kneading", "etwas", "sait", "templars", "nineveh", "mesures", "enquired", "aphorisms", "compleat", "consumptive", "dalmatia", "noisily", "readjustment", "unaccountable", "weise", "trickling", "commoner", "reminiscence", "pouvoir", "yeux", "fui", "waned", "assented", "overcharged", "pucker", "sanctify", "messrs", "insolent", "octavio", "portes", "finis", "beastly", "fortresses", "matrons", "thun", "gawain", "guinevere", "heresies", "annihilated", "tardiness", "mangan", "mose", "specks", "futur", "incredulous", "dere", "calvinist", "suas", "buckler", "peal", "asunto", "adroit", "dilettante", "georgiana", "ecstacy", "peasantry", "oppressors", "boeken", "corns", "faring", "dama", "unos", "pinkish", "blurted", "tutelage", "merited", "hacia", "peculiarity", "decrepit", "encroaching", "solemnity", "equivocal", "lieb", "amass", "maketh", "ihrem", "disengaged", "distilling", "effigy", "saloons", "assailed", "incensed", "zachariah", "veneration", "broach", "miseries", "personification", "partes", "scuttle", "rougher", "supplanted", "sardonic", "aghast", "raiment", "disused", "vetter", "stooped", "dower", "andalusian", "wordy", "feudalism", "achille", "magister", "bolting", "lumbering", "fourfold", "forgave", "antonius", "indien", "replenishing", "immemorial", "indwelling", "seh", "jaunt", "genere", "ipso", "quartier", "wallow", "unabashed", "haf", "homeric", "overpower", "expounded", "downpour", "dumbfounded", "cubits", "outlast", "frothy", "macedonians", "labouring", "pouvez", "nothings", "kommen", "allgemein", "colonist", "sorbonne", "rares", "colla", "philippi", "adduced", "agli", "unrequited", "mangle", "alludes", "theseus", "commuted", "medan", "saracen", "annulled", "covertly", "dalle", "rapped", "foreboding", "fortuitous", "autumnal", "xxxv", "sepulchre", "kunt", "despotic", "dicky", "beholden", "celui", "apostate", "enda", "faltered", "queda", "entrar", "sicherheit", "gorse", "louse", "wilfully", "paralysed", "tillie", "distanced", "vespers", "scylla", "vats", "urchins", "implore", "kindle", "pricks", "tenements", "tithes", "thinnest", "sipped", "mando", "pulsation", "hitching", "xxxiv", "obediently", "calvinism", "milked", "vesuvius", "disembodied", "aylmer", "scoff", "confidant", "nape", "disparaging", "impolite", "bataille", "oia", "domine", "sluice", "darke", "whistled", "furor", "austrians", "craves", "soiree", "trouver", "enslave", "dimanche", "grimly", "espouse", "casks", "conjoined", "cabled", "muchos", "lightened", "spongy", "verner", "specious", "threshing", "infliction", "frederica", "entranced", "deprives", "onde", "scimitar", "holz", "uninterested", "cavalcade", "adulation", "loitering", "dastardly", "ludovic", "avarice", "sangen", "butchered", "pointedly", "ouverture", "rustle", "excitable", "hermanos", "alluding", "frere", "insipid", "unfathomable", "ingmar", "holiest", "arbre", "effeminate", "vainly", "straying", "venereal", "mercifully", "blatt", "pansies", "acceded", "dregs", "obscures", "millicent", "foresaw", "befriend", "anker", "malign", "abortive", "embarkation", "varnished", "zarathustra", "valent", "knoweth", "anemones", "sacre", "hunched", "buzzed", "pickets", "astringent", "soothed", "vins", "premeditated", "cherche", "aucune", "pueblos", "sentimentality", "tenable", "jumbled", "triumphantly", "leva", "vergessen", "scolded", "fetters", "vulgarity", "magasin", "perpetuation", "tafel", "pliny", "sewed", "jubilant", "sangamon", "continuo", "welche", "silesia", "staat", "amputated", "reappears", "enquiring", "masha", "redden", "kreis", "faccia", "gae", "sobbed", "omnium", "copie", "snuggled", "surest", "bribed", "alarmingly", "kosten", "bloodless", "basle", "sigurd", "tute", "obliterate", "dort", "perron", "pestle", "falsity", "sapling", "elapse", "myne", "enamelled", "torments", "tortuous", "oiseaux", "seafaring", "mooted", "repented", "infirmity", "corydon", "selfishly", "drudgery", "pacha", "shrubbery", "navies", "impartially", "imperfectly", "slanderous", "interminable", "ancien", "soins", "indomitable", "unseemly", "vix", "godlike", "scrambles", "arbeiten", "merriment", "rotted", "thetis", "repulsed", "garni", "brickwork", "soulless", "abbots", "frontispiece", "vivacious", "bloodshot", "salutations", "pela", "dogmas", "forsooth", "geordie", "orestes", "deathbed", "indefensible", "brutish", "trill", "venetia", "melchior", "xerxes", "poudre", "ramparts", "disband", "symmetrically", "reek", "hearers", "frigates", "availed", "externals", "principales", "damsels", "spielen", "monotheism", "menelaus", "morsels", "hatte", "skirmishes", "congratulatory", "zuletzt", "melodious", "baited", "veined", "kens", "norwegians", "imitates", "conjugal", "boldest", "hafen", "flaubert", "enunciated", "strictures", "flinging", "ferme", "discouragement", "werke", "vesper", "parapet", "filles", "usurp", "gerade", "traduire", "peremptory", "unrecorded", "seiner", "gallia", "hayne", "lorsque", "fronds", "interposed", "jugglers", "veri", "dessin", "weet", "naively", "nominative", "cleaves", "doivent", "avenging", "ploughed", "severing", "ety", "hev", "cremona", "martyred", "afflict", "crags", "mimicry", "intersected", "tomkins", "winced", "literati", "trotted", "hungrily", "scold", "chirping", "utan", "tress", "vaunted", "astride", "nostro", "ruy", "emancipated", "ordain", "rapt", "wirt", "sawed", "receded", "emboldened", "pessimist", "sedate", "stammered", "supposes", "genteel", "engulf", "huguenot", "epicurus", "gouverneur", "upu", "hankering", "normans", "enumerating", "toiling", "spiteful", "governess", "alternated", "colander", "croak", "abhor", "boek", "inexorable", "chercher", "harmoniously", "bijoux", "worshiping", "gewicht", "coolly", "accompli", "wann", "vieille", "ellos", "hecho", "verry", "rowed", "elfin", "ingots", "ridding", "tegen", "troppo", "meads", "exhaled", "demolishing", "pratique", "calabash", "brigantine", "zeb", "fitzhugh", "rioters", "persecutions", "arriva", "cramming", "chuckling", "disfigured", "mers", "chios", "muro", "oreille", "transcended", "xxxvi", "cuerpo", "tiel", "faintest", "bleek", "adela", "genitive", "civile", "haupt", "testy", "physiologist", "imprison", "repelled", "abend", "eran", "quem", "plundering", "abhorrent", "rebellions", "sympathizers", "scribbling", "phineas", "emissary", "inhumanity", "wem", "belittle", "repudiated", "divina", "leonie", "sympathetically", "permet", "elis", "liddy", "dabei", "rollicking", "offhand", "geraniums", "bashful", "doze", "currants", "absolve", "conjectured", "grandest", "kinsmen", "lier", "welk", "shipwrecked", "doen", "tacitly", "dint", "reverberation", "quickening", "waal", "mistook", "apprehensions", "aunque", "celestine", "schoolmaster", "impressionable", "gingerly", "apologised", "riven", "taketh", "cornfield", "fretting", "fetter", "jeers", "manufactory", "jarred", "theorie", "armen", "bewilderment", "loveliness", "ministered", "idiomatic", "scalping", "slav", "attics", "wilhelmina", "hermits", "gullies", "prerogatives", "banishment", "tempering", "kampf", "fallacious", "vestments", "morsel", "leniency", "scrupulous", "woodsman", "bocca", "dicta", "meisten", "aubert", "richtig", "clumsily", "catholique", "turpentine", "ells", "cussed", "evaded", "thickets", "clink", "personage", "cavallo", "vender", "daar", "bouche", "delinquents", "furlough", "angleterre", "snarling", "samedi", "creaking", "bequeath", "subjugation", "gape", "clase", "unquestionable", "prendre", "irritates", "whigs", "despatches", "titian", "arras", "fathoms", "printemps", "physic", "nuptial", "thickest", "bulbous", "whist", "mieux", "darauf", "expound", "eget", "exhilaration", "ziel", "lordships", "chanced", "fastenings", "ketch", "treeless", "adores", "aground", "splendidly", "feuille", "inattention", "discolored", "traf", "sinning", "jouer", "forestall", "vater", "moselle", "gnawing", "crudely", "saplings", "profuse", "dispelling", "attainments", "gane", "couched", "bestows", "sone", "particularity", "knighthood", "blesses", "dure", "sickened", "tali", "canteens", "thoroughfares", "donatello", "penniless", "abrogated", "druck", "kingship", "puis", "manes", "relapsing", "arcadian", "claud", "swart", "eschew", "vastness", "precipitous", "detachments", "arsenals", "hoofd", "tramping", "vieja", "thereabouts", "bloed", "resultat", "betrothed", "pourquoi", "dispelled", "pierrot", "duca", "sameness", "scruples", "gloved", "bete", "dowdy", "clamoring", "aguas", "visitations", "recklessness", "stirrups", "intimated", "allspice", "squirming", "thunderstruck", "pleiades", "surreptitiously", "finery", "langen", "eugenie", "sequestered", "hesitating", "stoops", "stiffening", "scrutinizing", "allude", "sprawled", "interesse", "tomar", "courted", "condoned", "unsavory", "deservedly", "blackbirds", "vowing", "plying", "gangrene", "purplish", "stille", "enliven", "hollowed", "graven", "lengua", "craved", "fracas", "envelop", "dismount", "grudgingly", "quae", "bole", "believeth", "unafraid", "stamens", "omnipotence", "irresponsibility", "zelf", "seaports", "conscientiously", "boomed", "jussi", "joust", "grander", "shackled", "weedy", "sacra", "ipsa", "grope", "suomen", "echte", "brightens", "muertos", "jailer", "gleich", "gladden", "sarcastically", "tuft", "quickened", "reverent", "braved", "jaune", "joli", "beckoned", "unquestioned", "scrawled", "savagely", "usurped", "monstrosity", "certains", "ravishing", "grumbled", "disheartening", "nobis", "stolz", "unavoidably", "blest", "govinda", "menial", "clayey", "delighting", "vielen", "conjuring", "dutiful", "absurdities", "cabeza", "ony", "gordian", "edification", "flinch", "xxxvii", "despot", "affaire", "insincere", "inger", "vuelta", "beckoning", "vivant", "vendre", "ignis", "begone", "lucidity", "feuds", "toque", "wille", "primi", "hiver", "lateness", "dier", "nunnery", "forefinger", "rudiments", "erwartet", "heathens", "celibate", "simul", "clatter", "werd", "faultless", "awkwardness", "praiseworthy", "mosca", "seigneur", "ails", "frage", "vapours", "jij", "delphine", "bruder", "remiss", "languishing", "entrails", "erreur", "cossack", "thrashed", "topsail", "modicum", "malte", "solange", "ethiopians", "rajah", "persuasions", "steppes", "sheathed", "derided", "encroach", "correlative", "maire", "diametrically", "fasted", "eunuch", "algunos", "gazes", "virginians", "negligently", "sistine", "higginson", "hadden", "unmoved", "glum", "perplexity", "particulier", "sabe", "sulky", "guarda", "skyward", "woeful", "grund", "droop", "neque", "dislodge", "voyageur", "waded", "flore", "unacknowledged", "quietest", "carven", "aptitudes", "bonnes", "confusions", "fara", "alimentary", "wus", "republik", "encroachments", "ineffable", "hearer", "awakes", "republique", "generis", "zit", "probity", "formas", "grubs", "unflinching", "murmuring", "gaan", "jungen", "kop", "triumphal", "affable", "hijo", "worshipers", "avons", "flail", "adulterated", "nicodemus", "ardor", "wissenschaften", "veo", "missive", "ascends", "splintered", "transacting", "vus", "nomine", "busen", "loafing", "talus", "republicanism", "foibles", "cose", "choses", "squatter", "waldemar", "colourless", "unyielding", "flabby", "enlarges", "apace", "doktor", "harbored", "bulwark", "stringy", "seront", "sonorous", "breastplate", "draughts", "heaved", "lazare", "uel", "fashioning", "churned", "correspondance", "dappled", "gallic", "tacking", "feigned", "dross", "solidity", "doge", "indecisive", "recurs", "dripped", "epicure", "levity", "journeying", "dito", "oppressor", "metrical", "kopf", "immeasurably", "tussle", "fiendish", "glorification", "wayfarer", "arabians", "expanses", "nuits", "dervish", "irrepressible", "leider", "joppa", "wilted", "emoluments", "egal", "conned", "mutes", "outwit", "magnesia", "patronize", "impassable", "serf", "koning", "buries", "vobis", "signor", "phlegm", "reste", "freedmen", "obliging", "hermetically", "gravestones", "uncommonly", "nudged", "inhospitable", "dissension", "intermingled", "dwarfed", "langs", "asters", "surmounted", "elspeth", "salutary", "bringt", "frosts", "ached", "defile", "odio", "ansehen", "effectually", "unprovoked", "apocryphal", "pallid", "sulphuric", "antipathy", "atone", "douce", "storeroom", "theodora", "paler", "lhe", "wereld", "offing", "infest", "dampier", "hardens", "frisk", "alister", "expelling", "obliges", "pertained", "beneficent", "luxuriant", "mulatto", "plausibly", "concubine", "complimenting", "courtly", "dampness", "zusammen", "platitudes", "pois", "porphyry", "deviating", "taunted", "ernestine", "bubbled", "tienes", "korte", "mortified", "upturned", "cordage", "hobbled", "loath", "gagner", "nibbling", "unsophisticated", "vexing", "longa", "digression", "astonish", "dynastic", "cognizance", "piquet", "loveliest", "nearness", "vif", "procurator", "plaintive", "exult", "claps", "disreputable", "seraph", "dressmaker", "fehler", "publican", "hoar", "movimiento", "kreuz", "rebuffs", "reichstag", "woche", "handmaid", "oir", "chemises", "consuelo", "impostor", "nomen", "ponderous", "maisons", "scrupulously", "plaisir", "intruding", "baptize", "fatigues", "asaph", "princesse", "franche", "plucky", "dessins", "eusebius", "untidy", "loggia", "tribesmen", "subsist", "tuin", "augen", "beholding", "scarfs", "leve", "shallows", "ersten", "unjustifiable", "growls", "sported", "quaking", "refraining", "commingled", "coasting", "logement", "kindern", "conciliatory", "stiffen", "showman", "officiated", "distemper", "subterfuge", "jede", "aspired", "mathilde", "pues", "lazaro", "mouvement", "beispiel", "penitent", "toyed", "anglaise", "lamentation", "tunc", "extol", "patrimony", "belgians", "knave", "functionaries", "croup", "broadcloth", "disuse", "reeled", "quire", "goeth", "fascinate", "garish", "baronet", "bombastic", "francie", "scoffed", "thieving", "minde", "thinke", "snarled", "unearthly", "predestination", "verbindung", "regulus", "vidi", "trouve", "rapides", "reviled", "coverlet", "lustig", "bringen", "fearfully", "musketeer", "fiddles", "furlongs", "fens", "ancienne", "arraigned", "liquide", "tanz", "whitewashed", "gilding", "twining", "explication", "violette", "humanely", "jungfrau", "verdad", "perrine", "gaiety", "alten", "uttermost", "aristophanes", "letitia", "overthrew", "lave", "frowns", "fabricius", "sheepish", "diferentes", "antic", "abed", "edifying", "dreadfully", "aun", "sadder", "ravage", "contemptible", "unfailing", "fowls", "untoward", "gloster", "venu", "clergymen", "fiel", "endeavouring", "dislodged", "casse", "obviate", "juster", "genom", "ueber", "primero", "saluting", "beguiling", "bayonets", "trompe", "flavius", "gie", "playfulness", "confluent", "orde", "deel", "lernen", "husks", "beckon", "raved", "herren", "anfang", "jewelled", "reaps", "fatto", "traum", "premonition", "recut", "sureties", "montre", "grunting", "baubles", "personages", "actes", "exigencies", "marveled", "peloponnesian", "gotha", "tasso", "waffen", "cultivator", "nihil", "quintus", "crucify", "unsaid", "fonctions", "untie", "instigator", "girt", "annul", "lanky", "illa", "blushes", "shewed", "outdo", "sycamores", "truant", "shrieked", "ermine", "corroboration", "juge", "circe", "capitulation", "aspirant", "germinal", "vindicate", "repelling", "gesucht", "fallible", "pantheism", "strutting", "incalculable", "tijd", "soliloquy", "mammy", "beaks", "caresses", "quello", "indolent", "ursus", "banns", "thistles", "idiosyncrasies", "inducements", "ennui", "abetted", "expending", "ista", "sweltering", "purer", "hedgerows", "narrowest", "disapproving", "meses", "interrogative", "squealing", "feverishly", "sneaked", "obras", "drowns", "nostri", "persuasively", "walloon", "squalor", "panelled", "ossian", "chaplet", "narrate", "peleus", "ebon", "hesiod", "maman", "bleat", "glorifying", "gleamed", "valiantly", "steeds", "elli", "infallibility", "voll", "altes", "franciscans", "comport", "malheur", "overdo", "ragusa", "sette", "radishes", "deeming", "flaccid", "eum", "putrid", "unguarded", "prodded", "fasts", "sterner", "tras", "womanly", "surmised", "northwards", "tiu", "mayest", "judiciously", "worshipper", "diderot", "ruts", "regretting", "scolding", "bosphorus", "dimpled", "massing", "offen", "leathery", "hjem", "caballos", "grimace", "bribing", "unbecoming", "bridles", "rinaldo", "dejected", "vosges", "comely", "prow", "sprig", "apulia", "squander", "swarmed", "wields", "dragoons", "brune", "landholders", "cradled", "dreads", "spurring", "sollte", "plaything", "pander", "stamm", "abominations", "viene", "reestablished", "strangling", "cultivators", "insignificance", "deceiver", "helle", "sputtered", "faites", "merrier", "simples", "ruggles", "miel", "subsides", "nobler", "michaelmas", "bildung", "howled", "blanched", "allemand", "unequalled", "cicely", "temperamental", "dally", "malays", "nauseous", "brandishing", "wags", "chronicler", "allem", "fais", "disproved", "justinian", "lutte", "dobbin", "riz", "coquette", "menge", "remarking", "cobweb", "punctually", "unwillingly", "cadeau", "undoubted", "formless", "shipmates", "englische", "plaats", "shorn", "doubtfully", "typhus", "reticent", "welter", "lande", "exertions", "insel", "sprachen", "eins", "retentive", "gerda", "plodding", "deserter", "rending", "gaillard", "consign", "mantles", "neatness", "adornments", "britannic", "becher", "unbeliever", "parading", "gamin", "confederated", "lume", "overwhelms", "embankments", "quanto", "speculator", "madmen", "listless", "wheaten", "deprecating", "faggots", "ducal", "downcast", "tedium", "seamanship", "gascoigne", "pomegranates", "sooth", "knie", "sportive", "hewson", "aout", "turan", "undeserved", "principalities", "aider", "excelling", "misadventure", "meiner", "rond", "dramatists", "servile", "rickety", "enchantments", "fuori", "secondo", "figura", "prosaic", "diadem", "pani", "outa", "bedeutung", "sincerest", "sagen", "tittle", "imprudent", "keer", "trou", "nannie", "laat", "deliberated", "snubbed", "suffocate", "applauding", "epithets", "toch", "floundering", "preserver", "revolts", "espy", "deren", "hallow", "wharves", "kunde", "canvassed", "chastisement", "unmitigated", "whined", "sashes", "assail", "flirtation", "unterhaltung", "courtiers", "carboniferous", "brillant", "equanimity", "agitators", "venerated", "curs", "neer", "assimilating", "proudest", "subjunctive", "harun", "perishing", "inaugurate", "slavs", "libres", "noiseless", "cayley", "worshipful", "geh", "spurned", "selim", "chastised", "zich", "forethought", "viscera", "excitability", "madder", "exterminated", "mette", "bronzed", "grimy", "lascivious", "ille", "dispassionate", "bonheur", "charmingly", "glimpsed", "partaking", "firebrand", "deprecation", "intimation", "chequered", "glimmering", "alphonso", "falla", "disbelieve", "brevet", "darf", "troyes", "exterminating", "revolted", "bunched", "besoin", "scrutinised", "allez", "herded", "athanasius", "gemacht", "deliberating", "humaines", "londoner", "aeschylus", "plantagenet", "episcopalian", "zwar", "soldat", "nisi", "thucydides", "tapa", "repudiate", "advisability", "lope", "festering", "relinquishing", "dessa", "mercia", "furies", "piqued", "jinks", "biddy", "compris", "theophilus", "crony", "sambo", "stellen", "professes", "wherewithal", "shrieks", "taas", "ominously", "caer", "ablution", "demure", "athene", "jist", "ipse", "parasols", "munition", "veered", "jonge", "serfdom", "gossips", "rawlinson", "scuffle", "uncritical", "infatuated", "rhythmically", "gaat", "riotous", "tenga", "embittered", "unleavened", "veces", "stockade", "parece", "bushmen", "babylonia", "tempts", "tempel", "uur", "devolve", "satyr", "fearlessly", "ajar", "pampas", "altra", "suppers", "fluttered", "untrustworthy", "exhorted", "ravines", "yokes", "howitzer", "interjection", "stocky", "bazaars", "himmel", "greate", "strenuously", "wildness", "compensations", "laxity", "deathly", "unloved", "balked", "fairyland", "balaam", "hamar", "rekindled", "drams", "entreat", "brainless", "souci", "cessing", "cocking", "railed", "abounding", "fount", "poacher", "invisibly", "lithe", "intercede", "tusks", "hatten", "ayrton", "courtier", "blotted", "impetuous", "grammes", "shrouds", "ambergris", "hellen", "clearness", "embroider", "hubbub", "robed", "unchangeable", "wunsch", "haya", "magisterial", "boor", "recites", "anguished", "ailleurs", "meteoric", "jacopo", "equalled", "palabra", "arithmetical", "royally", "molle", "plantes", "dishonorable", "thwarting", "venise", "scurrying", "subverted", "urbino", "effets", "broadsword", "blankly", "auras", "bonfires", "allt", "cloudless", "conflagration", "xenophon", "bevis", "dethroned", "chapitre", "vestige", "courrier", "cheerfulness", "egoism", "cataclysm", "harried", "transshipment", "cuore", "fatherless", "puedo", "groen", "seers", "cretan", "roumania", "blubber", "appeased", "coaxed", "pageantry", "disparage", "triste", "chimed", "phraseology", "verdienen", "memoire", "morass", "intimes", "righting", "moder", "tasse", "dessus", "striding", "panelling", "braving", "prayerful", "raad", "transfixed", "balle", "leaven", "lout", "tucking", "unwary", "herrings", "cubit", "begets", "groundless", "prancing", "amelioration", "wark", "beeld", "bezahlen", "mightier", "enthroned", "overburdened", "dwindle", "lindau", "beter", "sujets", "acquiesce", "alacrity", "drawbridge", "gude", "overhauling", "girle", "pulverized", "holier", "mauer", "everard", "uncivil", "nondescript", "employes", "temperaments", "consulter", "simpleton", "brutes", "howsoever", "unsympathetic", "jermyn", "dico", "rejoinder", "condescension", "dilate", "rasch", "tiber", "bekanntschaft", "feuer", "secours", "skilfully", "abolitionists", "flustered", "compactly", "lasses", "fus", "corsage", "hym", "laboured", "enumerates", "decir", "relinquishment", "ohg", "sall", "cession", "liken", "forfeits", "heeding", "fata", "revenu", "helder", "verder", "caesarea", "naturelle", "wordless", "sleepily", "prowling", "harmonie", "eludes", "revelry", "deface", "propensities", "mimicked", "mete", "algunas", "uninjured", "rivage", "populaire", "lief", "toddy", "disheartened", "ruinous", "spoor", "upanishads", "eigene", "bewitching", "mihi", "individu", "accusers", "sunshade", "cuir", "hals", "furrows", "throngs", "sarcophagus", "dozing", "siete", "chink", "likenesses", "pervading", "caxton", "soames", "fermenting", "beiden", "blithe", "paralyze", "kazi", "tilling", "hereunto", "daad", "languish", "feathery", "reasoner", "adorning", "gaily", "weib", "samt", "jubilation", "tels", "storks", "accoutrements", "abeyance", "ciudades", "enfin", "suivi", "iniquities", "nadie", "purring", "squinting", "strolls", "encuentra", "gradations", "conocer", "vsed", "molest", "appetizing", "encamped", "trifles", "sammlung", "langage", "importantes", "suiting", "hesitates", "paralytic", "eastwards", "parsimonious", "pinafore", "alwyn", "albertine", "disposer", "politische", "foreknowledge", "galleys", "sunning", "farcical", "weel", "toiled", "incited", "rhythmical", "rippled", "tresses", "agitating", "oriana", "frankness", "castilian", "bunsen", "buenas", "susa", "sulle", "fuera", "outlived", "anny", "repulse", "basaltic", "hinter", "middling", "minstrels", "personae", "wain", "englander", "gascoyne", "knighted", "torchlight", "teniendo", "emanated", "southerner", "persevered", "hounded", "butted", "longings", "galilean", "ayant", "dominicans", "helmsman", "meditated", "shuddering", "homesteads", "abrogation", "justicia", "jutting", "deliverer", "knecht", "aeneid", "vehemence", "befell", "ette", "klar", "neige", "sneered", "chattels", "brambles", "disembark", "secede", "unmixed", "grieves", "prises", "tumbles", "sogenannten", "parnassus", "debarred", "dandelions", "abyssinian", "maler", "bulgarians", "coaxing", "marshy", "terres", "inne", "preying", "grasps", "subsisting", "freunde", "bladders", "avions", "junto", "bloemen", "latium", "shuttered", "alchemists", "morose", "poore", "regretfully", "abbeys", "dutchmen", "agitate", "vien", "abdication", "discontents", "botanists", "bohemians", "blir", "foreheads", "narrating", "gering", "pedant", "stubbornness", "distantly", "humaine", "averting", "pyre", "faubourg", "wooed", "chalky", "teamster", "beached", "fringing", "glans", "thousandth", "sacrilege", "demagogue", "demean", "changement", "stipulating", "propping", "straighter", "weirdly", "broods", "rejoices", "limber", "hablar", "mahomet", "telegraphy", "lehre", "doeth", "verschiedenen", "chrysostom", "blackfeet", "waistcoats", "chalked", "mightiest", "marvelously", "apse", "bailiffs", "infirmities", "illum", "aboot", "jolted", "manne", "jacobite", "viendo", "freckled", "plenipotentiary", "philistine", "gambled", "chaleur", "unimaginative", "joyeux", "gratify", "meuse", "certainties", "zie", "fittingly", "gelatine", "undid", "quelque", "publick", "electioneering", "nette", "ressource", "betel", "moisten", "demoralized", "peopled", "suffi", "swooped", "doctored", "soured", "quieted", "albumen", "encircle", "carmelite", "anges", "exhort", "voyagers", "tendrils", "thal", "nullification", "ostensible", "malarial", "exasperation", "stumpy", "jeden", "whereon", "entente", "nala", "mainsail", "inom", "promptness", "retraite", "excommunicated", "scalding", "storekeeper", "muskets", "uglier", "witchery", "predilection", "wavered", "climes", "firelight", "contrivance", "anoint", "scatters", "wallowing", "hindrances", "braver", "repartee", "boggy", "vragen", "termes", "chiming", "modulations", "philanthropists", "urteil", "retaliated", "founds", "poplars", "knightly", "debater", "tarde", "millinery", "appian", "irresistibly", "endeavoring", "comically", "substratum", "porpoises", "snel", "persuades", "rapports", "foreshadowed", "meekness", "audibly", "dewy", "obliquely", "uneasily", "meted", "liveth", "outre", "agin", "phoenicia", "boven", "jaunty", "balthazar", "squeamish", "tono", "parmi", "eccentricities", "pasar", "potentialities", "anthea", "letzten", "airships", "presuppose", "hetty", "affectation", "abdicate", "creak", "archdeacon", "haciendo", "pretension", "descents", "vicissitudes", "dupes", "larks", "tormentor", "tagen", "postilion", "weal", "grudges", "perversity", "convulsive", "inflame", "zien", "eclat", "doric", "pathetically", "bluster", "witching", "depreciate", "bellum", "gendarme", "dionysius", "imperceptible", "fattest", "atolls", "tibi", "parley", "jessamine", "palatial", "prelate", "flippant", "libations", "convivial", "trat", "adorns", "kamer", "grubbing", "commoners", "cultivates", "thankfulness", "nich", "unturned", "workroom", "zukunft", "phoebus", "censured", "sache", "relished", "boers", "toils", "salles", "enorme", "instigation", "veuve", "indefatigable", "overthrowing", "maudlin", "excusable", "craggy", "gushed", "extricate", "provocations", "deplore", "defrauded", "laut", "aplomb", "centum", "cabbages", "epee", "truism", "employe", "fervour", "babylonians", "fabius", "despondent", "ostia", "cunningly", "bathers", "turbid", "sceptics", "pollyanna", "bort", "privateers", "knowe", "preoccupations", "ludovico", "besonders", "villainy", "feuilles", "diverses", "maladie", "hurtling", "squabble", "ravin", "seest", "omnes", "methodism", "mente", "luego", "overtakes", "predominates", "phillis", "startlingly", "couplet", "falta", "inimical", "imperious", "townsmen", "sondern", "revoir", "handfuls", "gratia", "formant", "gongs", "eigenen", "larga", "pentateuch", "immobility", "purifies", "sparkled", "interchanged", "lulled", "disrepute", "rechten", "implacable", "sert", "employments", "carinthia", "attired", "uncalled", "repels", "zat", "aika", "pliant", "reappearance", "urbain", "avocat", "emaciated", "gern", "vassal", "cantos", "manse", "pining", "unknowing", "blithely", "moderns", "fashionably", "virginal", "augur", "colonizing", "bodleian", "bicameral", "chapeau", "dramatized", "bringeth", "paquet", "regle", "broomstick", "suffocated", "voulez", "marauding", "cynically", "assuage", "estrangement", "versicherung", "limped", "yearned", "fondest", "parce", "frightens", "incontinent", "amante", "perpetrate", "nombres", "mientras", "fiercest", "coining", "invective", "sueur", "depose", "pacify", "sunder", "excommunication", "grizzled", "lade", "caballo", "loathed", "florid", "fatalism", "despises", "chanter", "quacks", "arme", "wend", "blackest", "reihe", "roubles", "relented", "meinung", "tarred", "beget", "mooi", "stenographer", "nipped", "disguising", "invulnerable", "flickered", "quiere", "kummer", "hideously", "motherly", "modele", "vexatious", "coachmen", "girlish", "reddening", "foremen", "shamefully", "herculean", "tormenting", "pleura", "bragged", "pester", "deputation", "oppressing", "domineering", "obtrusive", "wrinkling", "wiry", "labyrinths", "jealously", "beare", "welches", "footman", "pense", "chafe", "tapis", "schoolboys", "alexandrian", "sinless", "manche", "nobly", "absolutism", "hause", "grosser", "gudrun", "sharer", "confidences", "wakefulness", "monopolize", "gehen", "consoled", "mayores", "contrition", "diener", "resound", "unsuspected", "archbishops", "tarpaulin", "abajo", "mustapha", "cherokees", "peaceably", "exacted", "oddest", "purposed", "evince", "hyenas", "schoolmates", "luogo", "breathlessly", "hoarded", "naturalness", "flings", "irritably", "gorgeously", "helt", "noonday", "courteously", "sinuous", "availing", "meekly", "briefer", "serfs", "vives", "homburg", "wailed", "ippolito", "thunderbolts", "tule", "hustling", "milanese", "foran", "bloomed", "hortense", "scrawl", "manana", "sprechen", "foamed", "refectory", "yearns", "unaccustomed", "platoons", "unbelieving", "luminary", "quitter", "purser", "pratiques", "furtive", "renouncing", "accosted", "conning", "tiempos", "incantations", "enchantress", "parallelogram", "wonderment", "pasado", "groped", "warder", "morbidly", "palfrey", "persecuting", "feign", "swooping", "jackals", "niceties", "outlive", "dereliction", "exactness", "barbarossa", "dray", "silurian", "detaching", "sunburned", "spasmodic", "interlacing", "elegante", "corne", "quietude", "roundly", "monarchies", "trost", "rhododendrons", "flirted", "vraiment", "royalist", "untroubled", "aspirants", "sheepishly", "denk", "haft", "parisienne", "russie", "warily", "cadmus", "telle", "aflame", "gits", "aright", "windlass", "studious", "fineness", "estan", "setzen", "pharisee", "devenir", "cercle", "urania", "amicably", "tureen", "nuptials", "greif", "flints", "satirist", "visiter", "pone", "camillo", "hade", "extort", "staaten", "gleeful", "sprightly", "grindstone", "speaketh", "sacredness", "menton", "petticoats", "proffer", "haply", "pronounces", "fussing", "stragglers", "scowl", "tinder", "omniscience", "vot", "leaden", "advantageously", "kinderen", "pounced", "statt", "wollte", "bayeux", "tertullian", "pompe", "fastidious", "ensconced", "cyprian", "sagacity", "nipping", "fogs", "ausbildung", "protestations", "trickled", "lungo", "erde", "fondled", "poids", "wistfully", "abounded", "heureux", "disloyal", "paralyzing", "staggers", "contorted", "polemical", "neighborly", "dabbled", "villes", "piteous", "olen", "perfunctory", "pervaded", "doorsteps", "falsetto", "tatters", "whan", "puissance", "tunics", "lepers", "gloating", "dismembered", "hierro", "perfidy", "minne", "meaner", "propounded", "valois", "insubordination", "impious", "absolved", "dishonored", "vivir", "bathsheba", "klara", "stilted", "hastening", "dines", "capon", "stiffly", "folgenden", "cacher", "festivity", "grk", "thessaly", "folgende", "ayre", "afire", "sowed", "proprio", "brahmins", "gloat", "entanglements", "clawing", "wrangle", "autour", "immensity", "squabbling", "acquiesced", "rosamund", "deinen", "consecrate", "pursuers", "predestined", "gneiss", "gevonden", "rhin", "disobeyed", "firme", "dishonour", "lavished", "courtesan", "unkempt", "bassin", "zeichen", "jeder", "interjected", "humorously", "victoriously", "ascents", "hingegen", "retarding", "indiscretion", "undertone", "adot", "decease", "stigmatized", "tactful", "friable", "palatinate", "liegen", "fawning", "decoction", "resents", "orientals", "squeaking", "tinkling", "drie", "nostrum", "masterly", "dunce", "fera", "butchery", "wresting", "treacle", "frankrijk", "foolhardy", "bristling", "boreas", "cherubim", "nightcap", "massy", "consoling", "nues", "characterises", "antiochus", "cutlets", "hoofs", "drawl", "veux", "manoeuvring", "lances", "helfen", "rivier", "imogene", "impute", "dainties", "leghorn", "directness", "glutton", "laquelle", "unnaturally", "disquiet", "deerskin", "meest", "sufficed", "extolling", "wearied", "barbe", "pitied", "hame", "sibyl", "lignes", "victoire", "erring", "geschiedenis", "acclamation", "ypres", "gigante", "solamente", "berenice", "cisterns", "kist", "panoply", "credulity", "coiling", "capuchin", "verkehr", "sympathise", "piti", "sist", "noirs", "pitying", "twitched", "clefs", "actuel", "vem", "panted", "midshipman", "juda", "gondolas", "swiftness", "necessaries", "nullity", "tuli", "tenemos", "relishing", "unsuited", "gurgling", "imaginings", "hvis", "boatswain", "hearthstone", "fondle", "cuddled", "superintendence", "regeln", "betters", "joab", "corruptions", "persevering", "transversely", "abelard", "illusive", "octavius", "disquieting", "ripeness", "veering", "alguna", "tiere", "junker", "vapid", "hohe", "pieds", "unremitting", "rechnung", "clenching", "cordials", "bandaged", "evanescent", "fevered", "indignity", "pinches", "aglow", "midden", "sieg", "notamment", "bullocks", "peinture", "moyenne", "valerius", "chucked", "ransacked", "bugbear", "wreaked", "hogshead", "masques", "halfpenny", "fetes", "kneels", "reticence", "iambic", "lisbeth", "deplored", "icke", "unfashionable", "jacobean", "loveth", "sceptic", "vociferous", "eunuchs", "comed", "salz", "languished", "sneering", "coitus", "churchman", "lisette", "cocoons", "deserters", "ainda", "verre", "smallness", "esas", "remotest", "retorts", "housekeepers", "farewells", "conscript", "redder", "cria", "troupes", "tiptoe", "sociability", "idealists", "xlv", "crowing", "celles", "thankless", "avers", "hochzeit", "schuld", "quale", "sublimity", "birches", "crunched", "ratifications", "ringleader", "thundered", "fumed", "feste", "thereunto", "compatriot", "discontented", "droning", "yawned", "scuttled", "wochen", "inoffensive", "erudition", "bedsteads", "perrot", "strictness", "welke", "entretien", "frivolity", "gulped", "subtler", "vestidos", "inviolable", "toten", "riflemen", "insufferable", "clasping", "landen", "interjections", "usurpation", "brimmed", "subjugated", "unlearned", "prostrated", "kaffee", "excusing", "rejoining", "subir", "etiam", "slanting", "maisie", "detested", "overal", "dauntless", "pulsations", "frugality", "apprenticed", "reflexion", "vomited", "loth", "undisciplined", "signalized", "lunged", "alii", "vergil", "wiens", "verts", "opere", "pouting", "watling", "daher", "vrij", "creer", "cranny", "springy", "perplex", "lamentable", "signes", "besuchen", "rebelling", "destitution", "rummaging", "broached", "puckered", "squalid", "shunning", "erhalten", "cinders", "interrogatory", "syndic", "cleaving", "semicircular", "montant", "trow", "overwork", "kirche", "farben", "roches", "pommel", "intermixed", "logik", "rerum", "freemen", "mellan", "linnet", "heightening", "goede", "laddie", "bellowed", "tante", "sair", "questi", "entier", "timbered", "sxi", "unrighteousness", "shrilly", "catullus", "dulled", "nuestras", "interlocutor", "kingly", "chided", "turbans", "acquit", "tota", "choisir", "hvor", "singe", "stunden", "harping", "etwa", "akimbo", "beeches", "seule", "augmenter", "hieroglyphic", "aryans", "banishing", "unicameral", "clamour", "sopra", "alvar", "punkt", "dunkel", "erle", "unadorned", "prefaced", "wijn", "gleichen", "verband", "majesties", "endearment", "fealty", "disputation", "leicht", "whoso", "thracian", "forerunners", "exhalation", "investiture", "animates", "ruffian", "turkestan", "balthasar", "ourself", "invariable", "inclines", "southey", "patronising", "deciphered", "shudders", "voie", "gerne", "ardently", "granitic", "untried", "luise", "narada", "intruded", "marmaduke", "coppice", "autocracy", "backwardness", "undiminished", "caput", "connaissance", "discomforts", "clammy", "indisputably", "rifled", "meglio", "pomerania", "fane", "latterly", "flogged", "disadvantageous", "philological", "enamoured", "unpalatable", "shrugging", "disse", "persistency", "conscripts", "chimeras", "befits", "instants", "denunciations", "pervade", "entrapped", "suerte", "apaches", "archduke", "myriads", "physiologists", "egotism", "motherless", "cien", "tiberias", "chaldean", "comedie", "reciprocated", "squabbles", "buffoon", "tilled", "rumbled", "mittel", "ambos", "disobeying", "drusilla", "sidon", "acrid", "dijo", "trespasses", "conversed", "ingeniously", "howitt", "counterbalanced", "undertakers", "pricked", "coppers", "reddened", "exhortations", "wohnung", "againe", "hijos", "poulet", "degenerates", "demeanour", "broadsides", "closeted", "unceremoniously", "genuineness", "bungay", "poissons", "volte", "suoi", "wirklich", "iho", "crannies", "prospering", "dearer", "familles", "minutely", "seditious", "trotz", "inarticulate", "turba", "brust", "rameau", "silvered", "youse", "seno", "poche", "neuem", "fromage", "gunboat", "drippings", "voici", "alida", "messager", "asceticism", "reconciles", "disentangle", "bestowing", "belie", "ostend", "divining", "balustrade", "fortieth", "adulterous", "slyly", "shied", "plantains", "eveline", "deferential", "enlivened", "coterie", "magnanimous", "plait", "guttural", "prided", "anciens", "capsized", "breslau", "unreality", "weiteren", "murs", "lath", "encampments", "hindenburg", "whiten", "derniers", "entendre", "cuidado", "reynard", "remarque", "katrine", "perused", "refrains", "furrowed", "tabernacles", "virile", "poignancy", "detestable", "pouce", "certaines", "sombra", "narbonne", "voisin", "jilted", "centurions", "poring", "quivers", "flaunting", "peeped", "kiu", "ellas", "quer", "wails", "gild", "debonair", "indignantly", "invigorated", "bucolic", "disaffection", "grappled", "executioners", "belial", "harde", "blessedness", "courtesies", "misericordia", "apotheosis", "jette", "bettering", "tigress", "geworden", "occhi", "chante", "bleating", "stratagem", "squatted", "dagon", "hugues", "atalanta", "partage", "authoritatively", "unpleasantness", "bettered", "imbecile", "gravest", "defilement", "butting", "gobbled", "hispaniola", "conceives", "townsfolk", "afflicts", "thinness", "counteracting", "marilla", "ramshackle", "dullness", "syllogism", "wrenched", "giovane", "usurping", "arouses", "augustinian", "scald", "rois", "rodolphe", "heliotrope", "aquiline", "reapers", "uncouth", "allein", "whimpering", "eleazar", "portent", "fatten", "crossly", "hadst", "fier", "admonish", "battlements", "transgress", "leant", "lank", "governorship", "tolled", "zealously", "aen", "dowager", "werken", "squealed", "convents", "romane", "vertrag", "usurper", "recitations", "inculcate", "olla", "encumber", "blut", "golfe", "wier", "unimpaired", "liue", "heedless", "rancor", "trots", "providential", "freiheit", "daresay", "kapitel", "liberality", "principes", "semaines", "stort", "indulges", "unthinking", "tutta", "marcelle", "flossie", "inestimable", "whiles", "henne", "distrusted", "prie", "mohawks", "ignoble", "frankish", "jeroboam", "timidly", "lurked", "greyish", "imitative", "igual", "pagodas", "ganze", "hobble", "maan", "roten", "kannst", "tills", "repentant", "comite", "meanness", "wege", "biding", "unassailable", "sidan", "mutters", "singhalese", "mammon", "cavour", "discoverable", "letty", "tombe", "beltane", "whir", "afflicting", "posto", "biographers", "escrito", "hyacinths", "demandes", "freeholders", "ventre", "facetious", "tinkle", "wormed", "histoires", "weiber", "approche", "civilly", "unhurt", "incredulity", "yawns", "croker", "liisa", "proscription", "foretell", "hoards", "boccaccio", "whimpered", "businesslike", "egypte", "juba", "frill", "landward", "cripples", "amusingly", "cornices", "ostentatious", "vrai", "pocketing", "bereits", "shylock", "deseo", "paymaster", "canaanites", "carnac", "gnarled", "doce", "gnashing", "preuve", "plod", "damals", "covetousness", "dammed", "piebald", "unawares", "scornful", "crosswise", "tuneful", "hache", "girolamo", "quienes", "humdrum", "distended", "faun", "parler", "folgen", "fatness", "summe", "lente", "dangled", "fixedly", "feebly", "objekt", "vexation", "bastions", "bailly", "threadbare", "emissaries", "weh", "vertue", "subsiding", "hebe", "purred", "lieve", "contingents", "squirmed", "haren", "sangue", "cringing", "saal", "kleinen", "hys", "outstrip", "demerits", "highwayman", "contes", "hussars", "fatherly", "jehu", "southwards", "swerved", "unas", "recurred", "roams", "fuhr", "hemos", "terrify", "licentiate", "periode", "innerhalb", "inflammable", "freundin", "disowned", "parlement", "surmount", "hellenes", "unheeded", "siecle", "nicholl", "magis", "wolle", "apprendre", "habitations", "warf", "cowering", "overhear", "tawdry", "doublets", "saintes", "buona", "gaspard", "skall", "canonized", "solicitous", "findet", "vorbei", "hulking", "realidad", "seconde", "carcase", "caballeros", "unwound", "whiche", "progres", "reveille", "garrisons", "professeur", "shames", "schicken", "predominated", "wilden", "pittance", "gironde", "gosse", "escutcheon", "winging", "alcibiades", "schatten", "curds", "sinfulness", "recapitulation", "trudged", "junger", "hummed", "convalescence", "verite", "spada", "priam", "unceasing", "disdainful", "cackling", "blancs", "freres", "aimer", "parsnips", "trembles", "davon", "dryly", "ingratitude", "postes", "godt", "largesse", "humped", "mooie", "rowboat", "perfections", "restive", "hackneyed", "canticle", "peine", "naivete", "circuitous", "frieden", "imploring", "erebus", "abridge", "picardy", "glisten", "clubbed", "turnings", "unblemished", "trenchant", "lilla", "volleys", "hommage", "girlhood", "freshening", "rill", "andar", "lodgment", "clumsiness", "witless", "regale", "crus", "siya", "amuses", "pallor", "unwholesome", "parsifal", "copra", "journeymen", "filipinas", "hippolyte", "marsa", "galling", "vei", "quitted", "tomba", "musta", "brawny", "quella", "fueron", "prattle", "partakers", "climat", "ilium", "livy", "incorruptible", "puritanism", "carthaginian", "assiduously", "nibbled", "appeasing", "piquant", "grond", "magno", "leute", "unreservedly", "tattle", "baste", "manier", "willst", "inseparably", "anthers", "buttonhole", "uncivilized", "insensible", "seasick", "redouble", "theodosius", "liberte", "rostrum", "ejaculated", "eux", "sables", "pian", "admonitions", "shewing", "suelo", "cower", "erfahren", "inferiors", "singed", "gird", "territoire", "pierces", "jugend", "kleidung", "erfahrungen", "solicitude", "pawnbroker", "reverently", "deign", "eher", "hominy", "doting", "fuerza", "blistered", "glittered", "hanseatic", "pestered", "preeminence", "billows", "biens", "etten", "carted", "despots", "gnaw", "bandied", "liegt", "vinden", "rijk", "perversely", "bors", "transfigured", "dauer", "quizzical", "couper", "informers", "resentments", "bartered", "sugared", "spittle", "circumspect", "demerit", "shouldst", "roundness", "acrimonious", "pulpits", "warding", "unbuttoned", "brot", "feit", "frolics", "groat", "matins", "formes", "bellowing", "platon", "abhorrence", "verbo", "osten", "blackish", "emme", "aphorism", "emanation", "miscreants", "unction", "redan", "seguir", "noblemen", "barque", "deride", "kirke", "houseman", "sedges", "pitiless", "zwarte", "portly", "jangle", "jarl", "beauteous", "veld", "contrive", "huguenots", "estimable", "scowled", "ministration", "willet", "wriggle", "impudent", "xlii", "petted", "meist", "prude", "heroically", "phoenicians", "enjoining", "willen", "hustled", "jinny", "surreptitious", "petulant", "unfurled", "sauf", "lits", "chinaman", "nonchalant", "disloyalty", "laconic", "westwards", "nase", "paha", "askance", "misma", "binnen", "baronial", "charrette", "denouement", "belied", "obliquity", "satiric", "quivered", "sche", "sanctimonious", "natt", "ebbs", "obed", "ezek", "heet", "stammering", "waked", "logis", "foolscap", "sorte", "oases", "brach", "limites", "calma", "unmeasured", "statuettes", "nubes", "unga", "gegeben", "satz", "twinge", "cultus", "trudging", "narcisse", "feasted", "rebukes", "colquhoun", "quadrille", "inconnu", "lucretius", "sprach", "ihres", "docteur", "meubles", "whome", "repressing", "embroideries", "booke", "ingenio", "intellects", "brawling", "veut", "tient", "gelatinous", "meilleures", "figur", "gentlemanly", "underbrush", "bemoan", "norsemen", "forsaking", "souvent", "bobbed", "diversities", "gouden", "pontus", "unintelligent", "holies", "annexing", "vriend", "amas", "asylums", "satires", "coffer", "costliest", "ravaging", "rarefied", "nebel", "gleichzeitig", "leyes", "deprecate", "lvi", "serait", "esos", "chivalrous", "overruling", "gendarmerie", "konnte", "groene", "obstinacy", "caked", "delude", "similes", "seeme", "puertas", "recedes", "wroth", "emetic", "gestellt", "holde", "capitale", "steamboats", "naturelles", "towered", "fastness", "gautama", "alsatian", "unrighteous", "torpor", "leser", "desecrated", "transgressed", "publiques", "rawdon", "endeared", "arsene", "pecked", "colonne", "dozed", "outstripped", "chaldeans", "perdu", "repast", "annee", "majestically", "shapeless", "heen", "contrite", "pursed", "principio", "entreated", "heliopolis", "chel", "righteously", "marvelled", "seductions", "taga", "propitious", "domesticity", "dashwood", "veta", "chastise", "inveterate", "peacefulness", "extolled", "absently", "promis", "breit", "copse", "espada", "highwaymen", "orators", "incorrigible", "abating", "sonore", "feigning", "passant", "liveliest", "sixtieth", "reproof", "filets", "baiser", "credulous", "inflections", "lintel", "allora", "stak", "hereupon", "clod", "alaric", "beneficence", "impregnable", "poca", "dessen", "penmanship", "dese", "girded", "bessy", "inscribe", "adelante", "serenely", "nosing", "crowed", "vnto", "cooped", "overwrought", "vivacity", "incontrovertible", "forenoon", "clotted", "jolyon", "certitude", "marshalled", "approvingly", "waif", "ruder", "suffused", "fanden", "altijd", "artless", "morne", "cowed", "longueur", "deeps", "forger", "busied", "venir", "kith", "vrouwen", "valenciennes", "komt", "noblesse", "jostling", "satiety", "tolerably", "consanguinity", "wint", "convulsion", "slumbering", "heraclitus", "semicircle", "vient", "squinted", "exaggerations", "editorship", "rapturous", "unobtrusively", "sabes", "choicest", "tempestuous", "vaillant", "bamboos", "noticia", "signora", "flitting", "laboriously", "inmost", "jehan", "vorhanden", "poesie", "snuffed", "cannot", "vache", "sere", "slighted", "keinen", "maner", "stammer", "inordinately", "fidget", "borst", "comprehends", "gleams", "sieges", "magnifique", "pollux", "sieben", "muzzles", "peleg", "punic", "oser", "saman", "epirus", "fantastique", "tilbage", "astern", "pelted", "stoutly", "insinuating", "auge", "leib", "unequally", "profligate", "sated", "acht", "apprise", "bothe", "goda", "beady", "oberst", "abdicated", "reveries", "hauteur", "unerring", "arter", "euer", "denizen", "elegiac", "bivouac", "owain", "doggedly", "hermano", "ladyship", "kneeled", "longe", "rire", "marcha", "problematical", "tanden", "drapeau", "crackled", "defenceless", "pricking", "invalids", "eiland", "harbouring", "droite", "fastens", "igen", "paysage", "fleshly", "striven", "lurched", "blotches", "persoon", "herre", "pistil", "legen", "northumbrian", "apprehending", "werde", "insinuate", "deadening", "froid", "angele", "dolt", "propria", "schreef", "agreeably", "scouted", "intime", "splendors", "capstan", "feint", "muscovite", "pursuer", "letto", "wrappings", "daunted", "candido", "ske", "aurore", "couplets", "socialistic", "narrowness", "dwelleth", "mogelijk", "moustaches", "manzoni", "brushwood", "arrogantly", "traurig", "lieux", "barricaded", "pillaging", "vingt", "tief", "perles", "bungling", "impel", "schlecht", "expectantly", "perching", "solum", "broiling", "gangway", "tantalus", "rapacious", "uniquement", "debased", "concubines", "jogged", "sentido", "entangle", "steepness", "franchi", "puritanical", "capacious", "prefects", "clew", "biscay", "unrolled", "tambour", "watchword", "drummed", "verging", "interdict", "geplaatst", "scamper", "devoutly", "transmigration", "deshalb", "redoubt", "meus", "kerk", "revenant", "instil", "boastful", "bilious", "orsini", "despondency", "disheveled", "exclamations", "allegories", "entonces", "trudge", "mincing", "scurried", "setzt", "homesickness", "metamorphosed", "hussy", "stoicism", "congregated", "covetous", "ewer", "grootste", "doux", "directe", "hysterics", "procures", "stimme", "aceite", "concerne", "devours", "waists", "judaea", "leden", "quidam", "potentate", "barbarity", "extirpated", "charlatan", "slouching", "susceptibilities", "plaited", "floe", "surtout", "agonies", "misjudged", "writhed", "beine", "housemaid", "eurydice", "undeserving", "untruth", "directement", "preyed", "relent", "zillah", "verba", "horsehair", "seinem", "handelt", "gien", "mandarins", "sforza", "indifferently", "nevil", "shuns", "teile", "retinue", "hulda", "impostors", "stehen", "brawls", "derangement", "mesmo", "hinaus", "epictetus", "impertinent", "ouvrir", "buffeted", "physiognomy", "hecuba", "oiseau", "behooves", "misshapen", "scrubby", "jedoch", "unpolished", "vales", "steadiness", "ceaselessly", "irishmen", "charmes", "succor", "branche", "efecto", "ague", "sodden", "helpe", "changements", "unavailing", "vagabonds", "irreverence", "ditt", "chaises", "statesmanship", "papst", "popolo", "saner", "tendre", "halla", "demoralizing", "prest", "disillusion", "frocks", "poner", "thronged", "iets", "beseeching", "irksome", "burgesses", "abbess", "minuit", "uncounted", "schoolroom", "varus", "terrasse", "teufel", "teaspoonful", "rambled", "bertin", "monta", "kneaded", "fertilised", "rosse", "emanations", "veiling", "squandering", "wahrheit", "quiescence", "gilet", "widowhood", "eut", "swarthy", "abyssinia", "populaires", "poetically", "durance", "farnese", "chid", "menaces", "desir", "ambling", "perilously", "numbed", "acteurs", "regel", "bathes", "drover", "wees", "dogmatism", "chasseur", "grudging", "reciprocally", "effusions", "snared", "brogue", "passeth", "gret", "namn", "squeaked", "seance", "stilled", "bygones", "assez", "mentre", "contentedly", "roughest", "entreaties", "ridiculing", "alternations", "penitence", "discours", "avails", "velvets", "completer", "streit", "recevoir", "tactfully", "speake", "gericht", "borde", "drunkards", "danton", "hurries", "smolensk", "terreno", "tweede", "ouvert", "duchesse", "mingles", "strafe", "corrals", "rectitude", "semble", "engen", "erreichen", "encircles", "garratt", "jorden", "uncleanness", "viens", "pried", "supplications", "onely", "deportment", "marchandises", "invidious", "weten", "seraphic", "gedanken", "malevolence", "wetten", "alcalde", "judicature", "vigueur", "einzelne", "exhorting", "libation", "facit", "soient", "duas", "rechts", "bagatelle", "chaine", "nonchalantly", "drenching", "verhaal", "subi", "chiens", "prance", "lapsing", "suivre", "edifices", "gruel", "fing", "exasperating", "grievously", "hauts", "partout", "hesitancy", "courte", "chafed", "kennen", "interposition", "callings", "satisfactions", "distrustful", "incredulously", "zij", "obsequious", "moyens", "dissolute", "briefest", "lamplight", "sharpshooters", "druggist", "absolu", "unprincipled", "sweated", "lieth", "flinched", "zeer", "pacification", "nitrogenous", "sackcloth", "enraptured", "indique", "boeuf", "fidgety", "disown", "sophistry", "illumined", "thir", "agonized", "pickpocket", "warbling", "shriveled", "conformable", "imprisoning", "incongruity", "uselessly", "gallantly", "bended", "drang", "poignantly", "untiring", "hostelry", "slumbers", "forfeiting", "fertig", "humphry", "numberless", "intemperance", "definiteness", "reproved", "privation", "westen", "peevish", "tapio", "pedagogue", "soothsayer", "facings", "multiform", "peuple", "herculaneum", "carthaginians", "micheline", "indelibly", "ashy", "cependant", "cruelties", "unseren", "cadences", "slavish", "bawling", "awestruck", "bluer", "felicitous", "caravel", "calles", "plaudits", "schooners", "mycket", "chacun", "demander", "weniger", "eltern", "adepts", "clefts", "kapital", "underhand", "sophist", "heimat", "idolatrous", "secundum", "smouldering", "tradespeople", "untersuchung", "polytheism", "varias", "revellers", "rebuff", "appellations", "draughtsman", "boulet", "verandas", "pwh", "pindar", "iscariot", "bombast", "soyez", "bateaux", "impulsively", "cuarto", "seeth", "milch", "depredations", "dews", "kalt", "temerity", "mlle", "eluding", "adventitious", "interdit", "corked", "deluged", "fleecy", "antelopes", "daub", "unanswerable", "darkens", "excellencies", "strahl", "isak", "gedicht", "atque", "untainted", "eigenschaften", "slays", "crees", "whirring", "miserly", "troth", "contemptuously", "frequenting", "mannes", "celerity", "grottoes", "marthe", "milliner", "komma", "blase", "hoose", "exonerate", "righted", "sayd", "travailler", "imperishable", "degen", "spurn", "famished", "romping", "oozed", "cuanto", "contient", "devrait", "bidden", "tuileries", "samen", "contraire", "vasili", "monopolized", "abstruse", "stripling", "overshadowing", "succour", "whizzing", "headman", "saat", "mellowed", "ebenso", "contiguity", "morts", "retracing", "similitude", "servent", "verdure", "sward", "exclusiveness", "anwendung", "forse", "deines", "tira", "reclined", "throbbed", "divines", "prostration", "wretchedness", "admis", "festooned", "barest", "steadfastness", "boog", "digressions", "diocletian", "fellers", "begrudge", "xliii", "coxswain", "schriften", "counselled", "sentries", "reproaches", "pediment", "hayti", "geef", "cassio", "meinem", "wanneer", "baleful", "swifter", "timotheus", "hulp", "gelten", "miroir", "promesse", "apenas", "hillock", "fearlessness", "neben", "waggon", "unalterable", "beelzebub", "inexpressible", "indios", "cherishing", "crooning", "bref", "wist", "eius", "disavow", "peals", "mariette", "backsliding", "ziehen", "whisking", "wantonly", "samovar", "zweifel", "oppresses", "footstep", "stewing", "schnee", "acrimony", "bristly", "soever", "ruefully", "unfavorably", "slothful", "sitt", "diep", "exhorts", "moloch", "epigram", "wafted", "keepe", "expends", "golde", "reassuringly", "thwarts", "sitz", "staats", "jedenfalls", "abhorred", "zeigt", "sollten", "mene", "worketh", "phosphorescent", "sauntered", "foundling", "illiberal", "deserting", "onlooker", "deathless", "assurer", "scandinavians", "legate", "dissuaded", "paled", "ascribes", "hearths", "duller", "discoverers", "furled", "denken", "caminos", "esdras", "typify", "ganzen", "commissariat", "seele", "abydos", "cornfields", "ebbing", "evelina", "resta", "portents", "venetians", "unnerved", "demain", "participles", "harmlessly", "purty", "possessors", "mephistopheles", "pologne", "seene", "fortes", "liveliness", "godson", "passa", "peur", "conserver", "paling", "deur", "bisher", "schwester", "autocrat", "shouldering", "hovel", "gauls", "conforme", "honneur", "stirrings", "decider", "lusitania", "rustled", "unquenchable", "foreseeing", "indolence", "profundity", "lawe", "paru", "vostro", "turgid", "exigency", "exige", "necesario", "reined", "prend", "unenviable", "genau", "unfeeling", "cooing", "haine", "bishopric", "espoir", "severest", "lesse", "beautifying", "glistened", "encroached", "corriente", "suppleness", "irascible", "eigenes", "canute", "vibrated", "denuded", "rendre", "subjugate", "commissaire", "gulden", "naturaleza", "niobe", "incorporeal", "orderlies", "thrushes", "dient", "ferried", "wriggling", "crape", "mouldy", "amant", "merest", "wordes", "perpendicularly", "expounding", "nutzen", "gestern", "swaddling", "benighted", "hysteric", "robespierre", "tillbaka", "exultation", "fand", "blanke", "selfsame", "overcoats", "calvinists", "grovel", "soberly", "therfore", "mellem", "gayest", "vais", "fetid", "boatmen", "vespasian", "singleness", "kette", "yearnings", "remise", "unquiet", "einzige", "herbage", "adduce", "twaddle", "unitarians", "unutterable", "outshine", "parisians", "stellt", "patronized", "aldus", "pommes", "inelegant", "clambered", "histrionic", "subsists", "degenerating", "recommande", "sergius", "taciturn", "sways", "bristled", "flecked", "mustering", "allemande", "sophy", "paramaribo", "betrothal", "boorish", "posa", "queste", "sinon", "devoir", "hunde", "adjoined", "soumis", "pire", "vilest", "niin", "vassals", "throttled", "fonder", "entrancing", "elope", "seid", "nehmen", "welshman", "beguiled", "besoins", "violetta", "stillen", "sinew", "mordant", "clotilde", "ascribing", "zahl", "compter", "germanicus", "declension", "fawns", "damaris", "anodyne", "dearie", "verum", "voller", "lequel", "enigmas", "kinde", "bezoek", "humored", "befalls", "endlich", "yli", "primeros", "chere", "fussed", "anabaptists", "xliv", "disembarked", "burgundian", "telles", "pente", "thumped", "superbe", "conjectural", "tendance", "idlers", "eigentlich", "hoog", "contortions", "effusive", "heilig", "cloistered", "redoubled", "choristers", "bosoms", "flapped", "supernumerary", "aqueducts", "ngon", "reprobate", "despues", "indiscretions", "riper", "forsook", "hittites", "tatler", "prelates", "unserem", "ensigns", "sauve", "miei", "spendthrift", "antipodes", "chers", "grossest", "shanties", "ploughs", "lashings", "noemi", "loue", "persecutors", "averred", "valueless", "imperceptibly", "jaren", "uden", "dise", "crevasse", "hastens", "huizen", "davantage", "brilliancy", "gushes", "marechal", "surer", "frae", "traitorous", "hacen", "levite", "quieting", "candour", "pacified", "drin", "gored", "remunerative", "intricacy", "coralie", "pendulous", "eare", "mourner", "enfold", "wirst", "troubadours", "amours", "reentered", "paupers", "bludgeon", "welled", "naturae", "inconsiderable", "cotyledons", "cackle", "sallow", "gemaakt", "montagnes", "reformatory", "demeure", "ostentation", "ninguna", "cherishes", "souper", "wrathful", "thuis", "partook", "ehe", "familiars", "blacken", "zorg", "possibles", "vannes", "schemer", "lika", "actuellement", "deiner", "writhe", "friendless", "proboscis", "fitful", "sicut", "genii", "intrust", "illi", "dishonoured", "unquestioning", "desultory", "fabrique", "pitifully", "egen", "menacingly", "emmeline", "linken", "disinclined", "lackeys", "codicil", "puerile", "kleber", "journaux", "worthlessness", "oblation", "franziska", "caracalla", "civilizing", "conseiller", "corneille", "merken", "dorp", "palaver", "gorgias", "tribu", "unvarnished", "overran", "folies", "wretches", "hoarsely", "bonhomme", "hellenism", "statecraft", "familien", "propia", "flout", "studiously", "reveled", "confounds", "pitiable", "countrie", "reiteration", "corsairs", "indiscreet", "duelling", "pedantry", "lugged", "debilitated", "blazon", "gars", "looseness", "neglectful", "gamla", "pillaged", "voces", "reasonings", "vestido", "agathe", "niemand", "tost", "worthily", "passy", "verfahren", "insomuch", "anneke", "scruple", "steadied", "coolie", "honeyed", "recoiled", "comprendre", "disliking", "chinks", "unripe", "shipmate", "convulsed", "noce", "cleanness", "unmolested", "insistently", "fording", "linie", "telegraphs", "coverts", "transgressors", "redolent", "impudence", "ananias", "vied", "eulogies", "weakling", "griefs", "yoked", "steeples", "tares", "detto", "tottering", "grossen", "scalps", "despaired", "quails", "satiated", "plupart", "principaux", "lightnings", "repenting", "souldiers", "manliness", "churchmen", "parthian", "knowen", "chirped", "facta", "himselfe", "derisive", "imbibed", "hanoverian", "samma", "warton", "equipage", "prophesying", "abodes", "kring", "spouted", "clanging", "windpipe", "veronese", "guiltless", "burnings", "caractere", "estaba", "distresses", "retaken", "heere", "intermingling", "foundered", "mandat", "blinde", "dispensations", "irretrievably", "thralls", "crise", "connivance", "miscreant", "bitterest", "uncertainly", "resenting", "kingdome", "familiarly", "reviens", "scowling", "swaggering", "grandly", "publicans", "graciousness", "footlights", "smarting", "pueda", "hatreds", "imperil", "salamis", "supplie", "zweite", "censer", "surfeit", "schneller", "obeisance", "whelp", "fantaisie", "monnaie", "ignominious", "entschieden", "sulking", "keenest", "ungainly", "darstellung", "bauble", "circlet", "rouses", "dormir", "consolations", "enslaving", "medes", "deale", "odorous", "indefinable", "faits", "kenne", "ironical", "sympathized", "uncultivated", "functionary", "suppositions", "jehoshaphat", "chevaux", "elegies", "carbines", "richt", "kaffir", "livelier", "gervase", "grenadiers", "bruit", "acacias", "magnanimity", "aleck", "propio", "fiesole", "gallops", "dexterous", "connaissances", "hebt", "beaute", "hoor", "modernes", "undignified", "stesso", "conocimiento", "mord", "endear", "effigies", "folge", "counteracted", "planking", "blockhouse", "confiance", "urbanity", "lawgiver", "totter", "rumpled", "scalded", "importations", "laughingly", "prefaces", "tenue", "idolaters", "seducer", "haire", "tenaciously", "moonbeams", "inculcated", "monate", "verschiedene", "wohin", "generall", "reposed", "cicerone", "mustaches", "hasard", "leddy", "mildest", "restlessly", "uselessness", "lezen", "doet", "oaken", "endroit", "harlots", "conduite", "rouges", "humours", "humain", "voltaic", "derriere", "xlviii", "flot", "cudgel", "aurait", "multifarious", "runneth", "tenu", "llegar", "abhors", "minarets", "wrack", "bleiben", "vividness", "beatitude", "husbandman", "procureur", "stuk", "douleur", "heaves", "xlvii", "sagt", "passi", "subaltern", "appui", "bharata", "longingly", "apud", "bandes", "roseate", "ruffians", "servir", "contralto", "tenter", "rues", "dote", "valdemar", "curtly", "resuscitated", "exemples", "confidante", "rashly", "athen", "leering", "soudan", "clearings", "pleasantries", "louer", "uomini", "atoning", "insinuated", "xlvi", "warble", "prodigies", "herbes", "phrygia", "overige", "dardanelles", "familiarized", "fakir", "rato", "divinities", "ostracism", "magasins", "buttresses", "drovers", "obelisks", "vierge", "doggerel", "existences", "farre", "extravagantly", "hauptmann", "builded", "volle", "slandered", "demagogues", "cephas", "flighty", "opposer", "ejus", "gabled", "convient", "ofta", "enrage", "sinews", "flemings", "glanz", "serjeant", "shadrach", "shallowness", "ensnared", "loyally", "sneezed", "darkling", "subservience", "nightingales", "gaped", "subduing", "apoplexy", "poorhouse", "sunbeams", "kaan", "brigand", "jahrhundert", "chasms", "jealousies", "ditties", "dignitary", "wenches", "dite", "gesicht", "improbability", "shrewdly", "sneers", "bloodhounds", "meed", "impish", "menaced", "seneschal", "deafened", "hooting", "cyrene", "dejection", "economize", "prophetess", "hatchets", "witz", "spoonfuls", "unten", "ebene", "funereal", "wrested", "deceives", "plaint", "imperio", "demesne", "briny", "nimbly", "supped", "calumny", "sigismund", "herrn", "verger", "ludicrously", "portend", "reves", "spattered", "couloir", "straggling", "cochon", "berthe", "acadians", "comtesse", "jailers", "chaud", "disastrously", "intimations", "arzt", "xlix", "heterodox", "manque", "codfish", "debility", "shirking", "rustlers", "demas", "zaken", "aloes", "obliterating", "victuals", "certo", "dully", "leonore", "exalting", "chide", "entrap", "indignities", "nombreux", "rhymed", "whirls", "compassionately", "hussar", "scow", "voorbeeld", "beide", "honora", "remorseful", "obstinately", "zei", "peste", "aggrandizement", "jotted", "unpopularity", "deluding", "boileau", "naast", "charta", "royalists", "lachen", "hennes", "nej", "achaeans", "cravat", "genug", "pinions", "mindre", "praetor", "peche", "sunburnt", "superficie", "grotesquely", "mown", "soms", "vagrants", "transept", "patois", "atlee", "seuil", "petrograd", "aveva", "bulged", "bated", "seines", "thereat", "aise", "recours", "cloven", "apollyon", "intemperate", "confiding", "fleisch", "eares", "compunction", "bonum", "unceasingly", "herdsman", "haat", "frightfully", "reprises", "fierceness", "remodelled", "unpleasantly", "szene", "bouches", "aggressions", "spectacled", "telegraphed", "resounded", "mickle", "sagacious", "moralists", "abimelech", "gehe", "valise", "prompter", "provincials", "distaff", "imbibe", "hisses", "garcon", "doel", "freude", "gnawed", "sieht", "oog", "clattering", "traite", "bleus", "tente", "reverberating", "incomparably", "bearskin", "ripens", "darunter", "benares", "recitative", "factotum", "zoon", "screeched", "quare", "anticipations", "determinedly", "calamitous", "pria", "hughie", "egli", "mopped", "sacrilegious", "fatuous", "elocution", "cilicia", "retraced", "palliation", "kunne", "misanthropy", "protruded", "hanse", "incompetency", "mebbe", "plainer", "chambermaid", "sapping", "perfidious", "voyaging", "humiliations", "umbrage", "fatiguing", "awaking", "presencia", "portmanteau", "moralist", "farbe", "legere", "tormentors", "distinctness", "expiation", "insinuation", "indem", "alehouse", "practicability", "swindler", "standen", "inquisitors", "dreamily", "frobisher", "digo", "motivo", "gibbet", "exactitude", "promenades", "grise", "epitaphs", "jostled", "mannen", "globules", "herdsmen", "conmigo", "reprove", "heareth", "ipsi", "inviolate", "zoroaster", "orations", "vistula", "laten", "examina", "erster", "autant", "schrift", "resemblances", "termina", "cuales", "lordly", "complexions", "despising", "assiduous", "verstehen", "epigrams", "dagny", "thenceforth", "girths", "swerving", "surpris", "frappe", "pobre", "lebens", "muerto", "enfance", "gesetz", "portentous", "conjurer", "dramatis", "receiued", "sergent", "hurls", "habt", "couronne", "dullest", "erschienen", "venal", "gebe", "grete", "lauter", "gourmand", "wearisome", "sortir", "exaggerates", "gurgle", "antislavery", "laertes", "apologetically", "clime", "poultice", "ministrations", "gendarmes", "telemachus", "sommet", "remonstrance", "capitulated", "karna", "prettily", "reeking", "cheapside", "citie", "zuerst", "persuader", "epistolary", "flutters", "elemente", "maitresse", "reappearing", "dudgeon", "pilasters", "theban", "kennis", "unwisely", "grammarian", "figlio", "peruvians", "lateran", "sente", "reverberated", "plenitude", "faim", "unpardonable", "robarts", "volgens", "bowmen", "blundering", "dishevelled", "exorcise", "scurrilous", "squalls", "parla", "vaste", "jedes", "shewn", "hiki", "vasudeva", "objetos", "briefe", "valets", "corruptible", "pedlar", "impassive", "abasement", "faints", "vicomte", "pillory", "dieux", "inquirers", "orte", "brahmana", "toren", "prostituted", "quartering", "amorites", "disavowed", "undulations", "redressed", "waifs", "cuyo", "siegmund", "steg", "harangue", "liefde", "yeomanry", "lepanto", "matilde", "passepartout", "gentil", "ablest", "faveur", "dicho", "whitest", "bastante", "handmaiden", "humors", "sollen", "cooed", "knabe", "gunboats", "comradeship", "inopportune", "exhaling", "lurching", "plumed", "poesy", "cheapness", "scythian", "proche", "backe", "sapped", "starched", "tasche", "insieme", "undistinguished", "unes", "gayer", "seceded", "belligerents", "baser", "ribald", "coursed", "habitants", "brusque", "officious", "hert", "gorka", "flannels", "contrivances", "capitulate", "wayfaring", "kammer", "dejar", "disfavor", "staden", "umgebung", "liveries", "sieur", "devez", "anatomist", "laundress", "bugles", "manie", "swindlers", "clandestinely", "sitte", "avere", "fichte", "coolies", "edra", "briars", "tarentum", "chaude", "unfitness", "annihilating", "swathed", "extorted", "tanta", "avaricious", "entfernt", "waft", "popish", "darning", "pasos", "crois", "fidgeting", "resinous", "granit", "flayed", "paramour", "enunciation", "josue", "frailties", "haunches", "morea", "chastened", "dropsy", "impositions", "wriggled", "displease", "agit", "moneyed", "halten", "peligro", "armee", "langsam", "toutefois", "cloche", "neatest", "howitzers", "mantelpiece", "proclivities", "rache", "falkenberg", "imitator", "agonising", "maximilien", "tuer", "meerschaum", "impiety", "loiter", "actuelle", "schwer", "begot", "suddenness", "baneful", "templo", "wenden", "twirled", "furtively", "betrayer", "jingling", "arrowroot", "welcher", "readjusted", "assails", "priestesses", "jostle", "admonishing", "avocations", "allons", "humblest", "haec", "mohammedan", "solitudes", "insurrections", "lodgers", "kunna", "cacique", "exalts", "grec", "cajole", "mhw", "swooning", "wincing", "unswerving", "enjoyments", "thirsting", "savants", "kentuckians", "monarchical", "celebes", "divans", "immodest", "perquisites", "flatters", "gedichte", "herzen", "beurre", "meni", "sayest", "lutter", "heissen", "voeux", "juges", "papists", "jeer", "premeditation", "waken", "tearfully", "sagged", "pugnacious", "companie", "bedecked", "finalmente", "soin", "oftener", "motioning", "saunter", "universelle", "firmin", "llamado", "versant", "flaxen", "pseud", "soie", "tempter", "miscarried", "rivulets", "corde", "appertaining", "nostre", "prochaine", "lohn", "partridges", "qualche", "nooit", "swum", "dunkle", "staan", "brakeman", "regretful", "coasted", "democritus", "yawl", "endast", "permettre", "drooped", "mehrere", "exacts", "licentious", "antiguo", "fermer", "deadlier", "doest", "romanus", "agog", "ponts", "liii", "yeomen", "lothario", "maal", "charybdis", "wazir", "habituated", "doff", "fede", "jests", "brandished", "jeremias", "raisons", "gouty", "twined", "comprend", "resister", "stoics", "soldiering", "viso", "tyrannies", "natuur", "greenbacks", "puesto", "sullied", "calvinistic", "abridgment", "frequents", "faite", "hoffnung", "leipsic", "bekommen", "fiercer", "entreaty", "creaked", "disconcerted", "roule", "interpose", "saan", "neveu", "hearkened", "mournfully", "surprize", "tenanted", "kerchief", "marvellously", "allerdings", "unenforceability", "moralizing", "phantasmagoria", "glutinous", "pretexts", "recollecting", "omdat", "jemand", "hundredweight", "hags", "severities", "sobered", "fournir", "coiffure", "forasmuch", "lige", "aliment", "moeten", "salir", "caprices", "laufen", "blockaded", "ignominy", "tempests", "scythia", "recriminations", "olim", "geeft", "dismally", "insinuations", "smiting", "hapsburg", "bevor", "zeiten", "lulls", "pompeius", "peux", "misrule", "unasked", "illo", "kuka", "copiously", "freien", "wildernesses", "perpetration", "transmuted", "abideth", "blaspheme", "blacking", "quelled", "threescore", "sitteth", "keenness", "quickens", "scornfully", "puerperal", "multis", "worldliness", "croaking", "ignoramus", "howbeit", "sisterly", "briers", "ouvrage", "faible", "avidity", "gascon", "bergs", "accustom", "consiste", "venez", "prouder", "pleaseth", "cottonwoods", "dienste", "superintending", "spectres", "poetess", "moluccas", "leguminous", "brigands", "quarrelsome", "moine", "damnable", "etruscans", "poeta", "tottered", "theil", "disdained", "shrivel", "ouvrages", "avaient", "firstfruits", "sinne", "daran", "untying", "slights", "throbs", "whitened", "genoese", "inclosed", "couche", "dismounting", "procede", "fattened", "planche", "vasari", "freier", "enkel", "jupe", "heaths", "enjoins", "terrestre", "insuperable", "recapitulate", "vois", "drays", "rester", "enceinte", "starlit", "wohnen", "inauspicious", "prescience", "capitaine", "magnates", "predilections", "picketed", "knaves", "sware", "scampered", "imposible", "academical", "krank", "ploughman", "heilige", "mettez", "conscientiousness", "basilio", "morceau", "splendide", "arabes", "cire", "acceptation", "schlug", "novitiate", "humoured", "idolized", "rivulet", "seethed", "geest", "etruria", "geboren", "senti", "allayed", "pored", "perceval", "wagen", "antiquary", "muscovy", "shoemakers", "zullen", "diggings", "legte", "emancipate", "achter", "burghers", "ignorantly", "ancor", "erlaubt", "diviner", "laisser", "bleibt", "discoloured", "gooseberries", "jahres", "wolde", "quarreling", "enterprize", "augustan", "fruitfulness", "slanders", "quelli", "embalmed", "uprightness", "stephanus", "apposite", "milles", "slaveholders", "kansan", "parlez", "nimi", "arbres", "kloster", "zulus", "limpid", "bridled", "forecastle", "statuesque", "polyphemus", "knowed", "encouragingly", "harboured", "foole", "misschien", "dolorous", "benefice", "unenlightened", "sagte", "croaked", "symbolical", "magistracy", "alighting", "schritte", "foretaste", "porthos", "incoherently", "ladylike", "iphigenia", "pleine", "allured", "jahrhunderts", "lucilla", "constitue", "sogar", "palpably", "weder", "improbably", "expressionless", "bowstring", "sickens", "jolting", "soundless", "hadde", "freest", "unspeakably", "gestalten", "unconquerable", "contemplations", "foretells", "empor", "pasteboard", "mangy", "artaxerxes", "misapprehension", "perche", "reverential", "sledges", "schoolmate", "utiles", "denke", "befinden", "infallibly", "unbidden", "callousness", "bloss", "tooke", "prefatory", "herakles", "extirpation", "pantaloons", "noiselessly", "adventuress", "fluch", "commodious", "pincers", "freshened", "artificer", "animo", "entangling", "quarrelling", "blackening", "appeareth", "partakes", "regaled", "disputants", "freundlich", "junks", "ingenuous", "floundered", "entrer", "jeered", "strabo", "assignation", "kleider", "mismos", "sheeted", "beefsteak", "undervalue", "pensar", "reden", "particuliers", "oratorical", "sacerdotal", "baying", "dikke", "dieren", "fief", "poate", "repents", "cleverer", "scheiden", "recommandation", "nimmer", "goaded", "ecke", "mislaid", "rotund", "zenobia", "pickaxe", "babbled", "gentlest", "sibi", "besiege", "blandly", "hobbling", "myn", "miletus", "scythians", "mainspring", "dinge", "slake", "drame", "dirent", "jedem", "speared", "attaque", "galleons", "sensorial", "legation", "strutted", "leafless", "deigned", "slaver", "iseult", "recommence", "giue", "aventures", "hellespont", "anciennes", "dalliance", "youthfulness", "privations", "trouvez", "monstrosities", "assai", "goest", "bonbons", "chroniclers", "vitam", "erregt", "dignities", "livings", "ferryman", "mockingly", "caisses", "devolves", "perder", "chemins", "hoeing", "debauched", "doute", "parlons", "loquacious", "vore", "saada", "annat", "displeasing", "intrusted", "prudish", "pelting", "drizzling", "soothingly", "wayfarers", "englanders", "flouted", "worthies", "courtesans", "heavenward", "theodoric", "meget", "charmian", "bezit", "ustedes", "exhilarated", "ansicht", "clanking", "repugnance", "joyless", "execrable", "lucrezia", "loftier", "stolid", "unacquainted", "simonides", "pawing", "balcon", "visigoths", "titter", "otranto", "defraying", "mondes", "charlot", "deified", "grecians", "princeps", "sumptuously", "unemotional", "coarseness", "universel", "enormes", "piedi", "flamme", "selber", "flitted", "toen", "gants", "disproportion", "counterpane", "gulfs", "gewalt", "surnamed", "logique", "deare", "venerate", "tomahawks", "scoffs", "unsavoury", "zephyrs", "exemplification", "waarom", "pleader", "lieben", "bawl", "casque", "cleverest", "convolutions", "siendo", "verloren", "foretelling", "munched", "vrienden", "receiveth", "jene", "ostler", "waddling", "pencilled", "escalier", "drachm", "colline", "plebeian", "eintritt", "ionians", "bekannt", "grammarians", "pflanzen", "undefiled", "furred", "segun", "overhearing", "puissant", "donnez", "blundered", "meines", "congealed", "pierres", "pouvoirs", "maister", "yit", "blasphemies", "covenanted", "disparagement", "anstatt", "minut", "teint", "sachen", "pretences", "unimpeachable", "meditates", "cheerily", "faintness", "effaced", "meself", "beguile", "revenus", "dagar", "rearguard", "saide", "inextricable", "rameses", "popery", "trustful", "lewdness", "sanat", "satiate", "sorge", "stupefied", "treu", "caire", "brasses", "lethe", "secondes", "tepee", "euphemia", "joue", "measureless", "scandalized", "jerkin", "stunde", "aforetime", "reflectively", "trackless", "patroness", "impossibilities", "inconsolable", "shouldest", "explicable", "plucks", "wreathed", "criminel", "alexius", "marksmen", "enthusiasms", "slaven", "standeth", "geven", "lesbia", "quellen", "worte", "drave", "blowed", "vare", "canting", "propitiation", "sinewy", "gamekeeper", "dulcie", "agir", "maakt", "uproarious", "gebruikt", "penitential", "glinting", "seeketh", "condescend", "terrifies", "humbler", "expence", "cavaliere", "pettiness", "slackened", "heur", "hija", "predominating", "auftrag", "endureth", "unapproachable", "boons", "vouchsafed", "lunga", "gamle", "philibert", "cordiality", "billow", "relativement", "inconstant", "effete", "storehouses", "carcases", "crestfallen", "iemand", "gloomily", "pouted", "lunching", "wakened", "eerst", "sidled", "tartars", "ebbed", "steckte", "issachar", "astir", "reasserted", "trente", "hardi", "reeked", "dispirited", "insidiously", "divined", "revelling", "mazzini", "befahl", "lovelier", "odium", "fettered", "hustings", "rasping", "besotted", "charioteer", "papered", "primum", "clamber", "adroitly", "ferne", "descente", "holte", "alders", "tache", "unformed", "ducats", "watchfulness", "gottes", "kleines", "steamships", "hvad", "cime", "sundered", "irretrievable", "roguish", "tenir", "maand", "ovat", "rapacity", "sicken", "elopement", "ardente", "worke", "folles", "besuch", "rummaged", "peons", "incontestable", "languor", "israels", "frivolities", "mantilla", "instante", "slovenly", "ambled", "celebre", "clementina", "necesidad", "hesitations", "protagoras", "curtained", "purloined", "lounged", "rustics", "purposeless", "visites", "skirmishers", "flinching", "certaine", "trumpeters", "disbelieved", "anderes", "tableland", "plaatsen", "infini", "revile", "unselfishness", "burrowed", "prussians", "buttercups", "footfall", "cocoanut", "cajoled", "sublimely", "tribunes", "kraal", "meilen", "whizzed", "dritte", "multitudinous", "javelins", "grenzen", "beatific", "bigness", "artificiality", "jeering", "maltreated", "chaperon", "consorts", "stimmen", "priester", "muckle", "vergeten", "causer", "respecter", "bornes", "propter", "churlish", "treasonable", "stowing", "twinkled", "schal", "existenz", "swindled", "vasta", "ridicules", "deres", "wechsel", "gracchus", "undine", "timorous", "soeur", "rende", "ensnare", "spurted", "quarrelled", "beggarly", "mutineers", "schwert", "inseln", "monter", "keiner", "fascinations", "suum", "unhesitatingly", "vivere", "prieur", "treacherously", "repas", "fyra", "disengaging", "propres", "moping", "obviated", "roue", "kracht", "merveilles", "fuerzas", "lunettes", "pirandello", "blare", "historiques", "comest", "sullenly", "kurze", "oppressions", "steadier", "miedo", "trebled", "demurred", "conciliate", "contenant", "ransomed", "donnant", "bedchamber", "chevaliers", "aufs", "calme", "roughs", "drawled", "niets", "ruhe", "florins", "einheit", "sechs", "tagus", "lydian", "pointes", "ehren", "remis", "vele", "imputing", "endowing", "spangles", "peterkin", "armer", "simplement", "brillante", "servia", "disunion", "shepherdess", "sextus", "linge", "lucht", "rueful", "sterk", "unbending", "ideen", "anderer", "beispiele", "equinoctial", "constante", "varuna", "jugement", "inheritor", "ginevra", "tarried", "remorseless", "disputations", "querido", "apennines", "gesehen", "wirkung", "redoubtable", "interessant", "antechamber", "seasonable", "clarisse", "moche", "platina", "anden", "viande", "ravish", "dubiously", "battlement", "gamester", "byword", "warded", "stygian", "referable", "rigueur", "jangling", "parfois", "doleful", "baize", "debasement", "besieging", "shrewdness", "interstices", "mayst", "parried", "demanda", "principios", "elbowed", "zahlung", "landschaft", "furze", "neighbourly", "nahe", "haast", "sensitiveness", "gelesen", "gascony", "pawned", "outen", "mendicant", "exigences", "keepeth", "beginnen", "vindt", "giddiness", "gebruiken", "warders", "senat", "retributive", "pyrrhus", "vont", "flagon", "traduit", "innere", "geste", "barefooted", "chattered", "overhung", "demoralization", "pebbly", "stellan", "abashed", "samme", "aurelian", "sacristy", "charitably", "joka", "boutons", "folle", "brooded", "sylvanus", "guter", "dandies", "oracular", "undefended", "lecteurs", "kleid", "hizo", "humorists", "unities", "papiers", "rakish", "effervescence", "enthalten", "unworthiness", "isaias", "moraines", "dorrit", "unflagging", "wur", "corroborative", "komme", "ruffling", "voet", "hardihood", "bougie", "calleth", "greenness", "recrimination", "basked", "embarrassments", "aureole", "disgusts", "nombreuses", "tiden", "sledging", "igitur", "footmen", "recoils", "quadrupeds", "tahi", "bewailed", "morceaux", "roughened", "gewoon", "thinketh", "thoughtlessly", "depute", "besteht", "returne", "savours", "edes", "bulwarks", "clods", "maoris", "mantled", "encouragements", "unfaithfulness", "fenian", "boten", "eateth", "bedraggled", "chiffres", "readier", "ineradicable", "floes", "steadying", "cowered", "monseigneur", "grotte", "verschillende", "pluie", "dispassionately", "mirar", "holen", "slacken", "disgorge", "warre", "avantages", "clamouring", "attainder", "followeth", "communing", "mischievously", "communistic", "jongens", "thys", "zweiten", "chastising", "mouvements", "derisively", "lopped", "spoliation", "pleasantness", "meilleure", "montrer", "phosphorescence", "daba", "lustily", "avantage", "antediluvian", "irreligious", "vindicating", "objeto", "ascetics", "creuse", "scorns", "laggard", "vues", "jadis", "blockheads", "saddening", "llena", "malcontents", "gentes", "nane", "satins", "danser", "unmindful", "indescribably", "unruffled", "inclining", "aquellos", "drapeaux", "animosities", "inured", "pardoning", "weshalb", "somit", "conoce", "giorgione", "enfranchisement", "rebuking", "perceptibly", "cierto", "vitiated", "wizened", "wintered", "comique", "sympathizing", "beziehungen", "townsman", "continuer", "gorged", "mildness", "luckless", "maecenas", "caracteres", "gunwale", "indigestible", "jowl", "prinzessin", "unclosed", "warten", "causas", "inclosure", "voluptuousness", "solide", "paroxysm", "merchandize", "construire", "meester", "whetted", "seraglio", "scourges", "corroding", "lejos", "leadeth", "soupe", "jongen", "guiltily", "teaspoonfuls", "acquainting", "parapets", "twittering", "augurs", "admiringly", "illumine", "selten", "awfulness", "encamp", "henceforward", "scalped", "huddling", "erfolg", "combated", "evinces", "gewinnen", "deputed", "clambering", "surplice", "factitious", "fitfully", "vrede", "ascanio", "perishes", "oncle", "laisse", "blanches", "vieilles", "skulking", "demur", "monstrously", "imposts", "diaphanous", "theodosia", "wagged", "aske", "vilka", "peradventure", "surmounting", "satyrs", "grandsire", "evasions", "lumbered", "cortege", "rapidement", "countenances", "beholds", "contradistinction", "scampering", "easie", "tourna", "sainted", "inglorious", "contrario", "whereat", "discuter", "defrayed", "kirchen", "kaum", "trouverez", "repudiating", "insupportable", "undisguised", "discerns", "tantum", "juden", "deaden", "victime", "unalloyed", "venial", "widger", "griselda", "hansom", "nonchalance", "frapper", "regarde", "amoureux", "cypresses", "phrygian", "lamed", "workingman", "scoffing", "hulks", "sauvages", "breede", "ruminating", "honorius", "abjured", "jacobin", "communiquer", "nere", "insincerity", "persecutor", "dichter", "cloches", "crevasses", "singen", "burgher", "ferner", "unstained", "unflinchingly", "subsisted", "notaire", "tamen", "entro", "songer", "surprized", "rehoboam", "fromme", "deputations", "ringlets", "retourne", "scourged", "survivals", "mollify", "commonwealths", "blockading", "shakspeare", "triumphing", "ecstasies", "rends", "nahm", "bilden", "bedclothes", "impertinence", "commissaries", "languidly", "sedulously", "venne", "grimaces", "neger", "loftiest", "decembre", "recommenced", "stuhl", "pochi", "depopulated", "upraised", "formen", "whereunto", "fuit", "vorst", "unfruitful", "conceits", "shrivelled", "geschenk", "jesting", "begriff", "erfahrung", "tendril", "quoque", "dayes", "entendu", "ercole", "indes", "beareth", "sleighs", "pensiero", "licentiousness", "uren", "unshaken", "englishwoman", "limply", "hereward", "ahasuerus", "pythian", "compassed", "hablando", "unsettle", "proconsul", "coarsest", "jenseits", "woord", "gentility", "assizes", "devons", "serue", "quadruped", "honourably", "insbesondere", "chivalric", "helgi", "womankind", "streng", "penknife", "copyist", "eadem", "entwickelt", "solemnized", "palpitation", "haughtily", "valentinian", "kindreds", "counterfeited", "sweetmeats", "tousled", "unfastened", "venire", "courser", "flaunted", "canopied", "dethrone", "vouchsafe", "hereabouts", "blackguard", "unitarianism", "gegenwart", "garrulous", "eftersom", "controverted", "serviette", "venga", "amiably", "schreibt", "sowohl", "nappe", "fulsome", "terribles", "gauzy", "verie", "cornes", "noires", "echter", "mangel", "marcher", "beetje", "vostra", "patrie", "lvii", "dilatory", "unco", "jagd", "debase", "hoher", "alltid", "wollten", "distil", "cinna", "splendours", "fronte", "abreve", "clinking", "apposition", "maddened", "vaster", "florentin", "slouched", "remonter", "aguinaldo", "sorrowing", "revenir", "hohenzollern", "neere", "devient", "moeder", "exultant", "pilfering", "trousseau", "frisson", "kaikki", "unconquered", "farces", "connu", "perjured", "seeke", "eloped", "corpuscles", "obscurely", "dreamless", "dadurch", "lamely", "curdled", "haie", "schoon", "wonted", "gallants", "dasein", "respectably", "fixity", "zehn", "yelping", "vaine", "croesus", "obdurate", "ofte", "tuuli", "absolue", "christabel", "ransack", "belisarius", "schlag", "taler", "piously", "quaintly", "rationalistic", "usque", "partis", "seras", "schritt", "disinclination", "eingang", "aloofness", "arminius", "dilating", "parthia", "felucca", "premisses", "glibly", "putrefaction", "unfortunates", "pottage", "ligger", "tubercles", "herzlich", "manservant", "unluckily", "plumped", "disinherited", "resounds", "crut", "anciently", "tiens", "remaineth", "ratione", "begetting", "gurgled", "scheint", "hopefulness", "poil", "voiles", "hez", "citer", "dehors", "vindictiveness", "potest", "lolling", "aboue", "extorting", "adventured", "elkaar", "clattered", "pouvant", "oure", "unsteadily", "sufferance", "muu", "charmant", "mede", "raptures", "dinna", "barrenness", "placidly", "bawled", "enkele", "protoplasm", "dyspeptic", "gaue", "diffident", "affianced", "communs", "zeker", "guileless", "ebbe", "wery", "opprobrium", "geheime", "imputations", "marchioness", "pferd", "capriciously", "ganske", "superintend", "bantering", "indorsement", "perspiring", "dissensions", "baseness", "blotched", "implores", "gewesen", "digne", "hillocks", "jalousie", "straat", "nogle", "solche", "fretful", "geheimnis", "dresse", "inquisitorial", "circumspection", "unsullied", "spirituous", "garrisoned", "supercilious", "soldiery", "skirmishing", "profaned", "ordinaire", "prochain", "ebullition", "avowedly", "notwendig", "remoter", "reflexions", "clamorous", "sois", "scullery", "seemeth", "etait", "blasphemed", "disconsolate", "einde", "antiquaries", "quibus", "whimsically", "spinsters", "hohen", "fahren", "exactions", "cupful", "lugger", "bestimmt", "patricians", "atoned", "tourbillon", "causeth", "unpromising", "geluid", "caissons", "surcharged", "stoff", "quarreled", "suckled", "soort", "pulpy", "militaires", "partaker", "pigmy", "censures", "morir", "digged", "fust", "confessors", "kleur", "braut", "lacerated", "promptings", "vouched", "obligingly", "puo", "yerself", "jael", "tragen", "spinifex", "unexpressed", "lunched", "scourging", "haroun", "manfully", "vidare", "revolutionist", "kennt", "tracery", "ebers", "surmises", "torno", "bedingungen", "falle", "seemly", "catched", "saura", "habet", "preso", "naughtiness", "derecha", "fastidiousness", "demoniac", "penury", "wainscot", "supernal", "impelling", "cellule", "einzelnen", "modeste", "flits", "vacillating", "jocular", "galop", "jacobins", "forsyte", "fathomless", "chiding", "savoured", "algun", "marvelling", "plentifully", "wakeful", "conter", "dicen", "homelike", "swooned", "unsociable", "puisque", "allgemeinen", "fatta", "drear", "erreurs", "buffoonery", "rashness", "pensamiento", "impels", "dissembling", "consistence", "intimating", "dieth", "missis", "appeler", "possa", "aemilius", "slunk", "deswegen", "coadjutor", "footfalls", "lombards", "jego", "jewess", "endued", "sorrowfully", "iniquitous", "tramped", "ecclesiastic", "agriculturist", "hanc", "hildegarde", "waylaid", "blustering", "blauwe", "uniforme", "granaries", "ombres", "dolch", "estaban", "deras", "dishonourable", "bespeaks", "smilingly", "avow", "whar", "certa", "assize", "ducat", "suuri", "schrijven", "nachdem", "hundredfold", "poing", "knickerbockers", "hechos", "fiers", "betook", "caressingly", "hooted", "gjort", "instanced", "shet", "corpulent", "jacobites", "stumm", "veldt", "springen", "moros", "tierras", "mystification", "eorum", "recoiling", "pshaw", "erscheint", "ruban", "apoplectic", "lingvo", "basest", "fitly", "marchands", "flirtations", "conocido", "unctuous", "enlivening", "sentir", "mauvaise", "beaumarchais", "plaints", "entfernung", "startles", "colonnades", "theatricals", "hoogte", "intimacies", "remonstrated", "leichter", "braying", "nuages", "lassitude", "leibnitz", "moonless", "changeless", "sagely", "unfavourably", "valorous", "endurable", "leid", "prolix", "trespassed", "shews", "longtemps", "sidelong", "principalement", "clamored", "einigen", "scheldt", "perte", "idiosyncrasy", "clucking", "glaube", "cualquiera", "donjon", "messieurs", "goutte", "workingmen", "paleness", "festen", "alack", "trivialities", "tristesse", "discourteous", "dimness", "besetting", "daunt", "boue", "vorm", "indisposed", "rente", "drog", "strategical", "thermopylae", "ivanovna", "landet", "skola", "amidships", "meete", "garder", "buiten", "beeves", "nemen", "alwayes", "looke", "preternatural", "versuch", "conduce", "sien", "centimes", "feare", "retourner", "neder", "earldom", "indubitable", "juifs", "handsomest", "decorous", "chagrined", "gemeinde", "imbecility", "ouverte", "goud", "buffeting", "doorkeeper", "absolument", "schwarzenberg", "bushrangers", "bounteous", "steine", "lulling", "toucher", "steeled", "patronised", "whisperings", "detests", "haughtiness", "ilka", "defiling", "frenchwoman", "betide", "estime", "emolument", "rivalled", "prithee", "wisse", "expedients", "beautified", "precipices", "llevar", "walketh", "mutta", "diffidence", "tablespoonful", "meum", "bestowal", "tingled", "hangen", "conduire", "unrelieved", "morgon", "ariosto", "swindling", "saragossa", "gladiatorial", "parthians", "parer", "reichen", "bacchanal", "perplexities", "ablutions", "arten", "innan", "vallen", "tulla", "unkindly", "lovest", "stratagems", "carousing", "envies", "condescended", "freighted", "gange", "compagnies", "slackening", "pardner", "wondrously", "dingen", "teilen", "shimmered", "tror", "anteroom", "agriculturists", "marins", "slechts", "watermen", "citoyens", "sorti", "megara", "mayenne", "beardless", "cheerless", "tenido", "goot", "tuch", "wacht", "moistening", "unprejudiced", "explications", "dissimulation", "restes", "pined", "inculcating", "combien", "pensando", "oorlog", "plaits", "fleuve", "agrippina", "neen", "erit", "satt", "budded", "liest", "plaintively", "devenu", "threateningly", "profligacy", "gwendolen", "subtil", "meshach", "videre", "armie", "hoffe", "hungered", "pecho", "bluntness", "kuin", "lebe", "gesticulating", "pourraient", "athwart", "hermana", "shambling", "tenderest", "ordains", "propound", "immoderate", "acuteness", "hewed", "kindnesses", "douze", "unaccountably", "neun", "plainest", "boire", "sech", "pesar", "gavest", "subtlest", "racines", "partaken", "gruffly", "etes", "welkin", "breviary", "lineaments", "unburied", "insatiate", "intolerably", "discomfiture", "puso", "mirando", "threepence", "ebenfalls", "libanus", "unmercifully", "milord", "behandlung", "velours", "tochter", "itse", "noces", "lampes", "chary", "quas", "danach", "wouldest", "primroses", "manumission", "mortifying", "gondoliers", "krijgen", "ministres", "garbed", "adelheid", "memnon", "nuo", "desperadoes", "nuage", "sesterces", "coucher", "freunden", "civilize", "phial", "faute", "arrant", "offrir", "appealingly", "multe", "declamation", "miscarry", "complacently", "unmerited", "insubordinate", "feux", "assuaged", "dukedom", "efface", "dazzlingly", "peintre", "looketh", "whalebone", "minutest", "ungovernable", "wellnigh", "meuble", "ziet", "wittily", "schmerz", "foolery", "exulting", "habitant", "craned", "ennobled", "profundo", "arbeid", "apuleius", "pourtant", "wantonness", "scenting", "beziehung", "fik", "flinty", "comanches", "ordnung", "ceremoniously", "gloire", "wobei", "hollowness", "zeggen", "jardinier", "serai", "plw", "desierto", "fancying", "protuberance", "largeur", "divin", "portait", "tersely", "deploring", "sallies", "frontiersmen", "contraries", "armful", "envers", "extricated", "dissemble", "bouteille", "impost", "countenanced", "essayed", "findeth", "gesagt", "zustand", "pandavas", "vaguest", "fenetre", "passen", "feebleness", "plodded", "lesquels", "excellente", "gik", "nieder", "brise", "facilement", "inflaming", "prete", "augury", "diabolus", "revelled", "mayhap", "humbles", "poetes", "metier", "personnages", "demoiselle", "unhampered", "matelas", "puisse", "indissoluble", "netta", "nicety", "tablespoonfuls", "witticisms", "enfeebled", "surveiller", "revolutionists", "cozen", "middel", "penitents", "imprudence", "tiptoed", "reicher", "magyars", "civilities", "trussed", "dulcet", "sirrah", "rapporter", "festal", "couteau", "baronne", "heartrending", "devotedly", "plancher", "amies", "steeps", "salubrious", "spearmen", "houden", "marriageable", "imposture", "mutinous", "jabbering", "tyrian", "pourra", "peremptorily", "whirlwinds", "despoiled", "lugubrious", "ringleaders", "begriffe", "listlessly", "affronted", "debout", "probablement", "daintily", "pikemen", "deinem", "partager", "exaction", "unlighted", "washstand", "overspread", "losse", "piteously", "politischen", "tager", "largess", "weightier", "plenipotentiaries", "muka", "insensibly", "snart", "contento", "parchments", "uusi", "scotchman", "repousse", "ingratiating", "bairn", "poisoner", "prodigiously", "unerringly", "qualm", "aquel", "marseillaise", "uncharitable", "bestimmung", "shiftless", "visages", "subjoined", "pierrette", "befindet", "daubed", "ostentatiously", "unvarying", "choisi", "whereto", "cottagers", "voluble", "ingratiate", "helpmate", "ligt", "soldats", "gloaming", "adamantine", "weinig", "kansa", "rudest", "forcer", "einfluss", "brunnen", "oreilles", "varit", "braucht", "gutes", "irresolute", "mogen", "aarde", "smartness", "burthen", "attente", "bekend", "lleva", "unsparing", "bewegung", "paard", "alcide", "espied", "effrontery", "vacuity", "pillared", "queerest", "impolitic", "defiles", "byles", "indubitably", "mottoes", "molti", "questioningly", "generalship", "debasing", "victimes", "demurely", "talar", "donker", "peuples", "humains", "comun", "prettiness", "usurpations", "plebeians", "habia", "meurs", "philosophique", "sloops", "regierung", "savez", "gesang", "gick", "saturnine", "trinken", "hungering", "unreasoning", "morto", "thoughtlessness", "pobres", "rasped", "celestials", "florrie", "turneth", "childishness", "glauben", "revenged", "radiantly", "gefahr", "prohibitory", "destine", "forestalled", "converses", "commonplaces", "waggons", "interet", "duenna", "outwitted", "summat", "bespeak", "pocos", "waarde", "wheresoever", "compromis", "wyth", "obwohl", "partei", "meddlesome", "bustled", "neckerchief", "brahmanas", "misgiving", "farthings", "gebiet", "disfigure", "rancorous", "forsakes", "torpid", "doctrina", "atem", "canne", "intendant", "bereit", "fiere", "swiftest", "confidants", "unwonted", "astonishes", "joues", "recondite", "sightless", "blunderbuss", "besondere", "chiselled", "unconsidered", "hottentot", "tarda", "fausta", "beholders", "quelles", "vertes", "invitingly", "gloated", "wearying", "straitened", "disdainfully", "romish", "servitor", "ingrate", "unvisited", "officier", "bairns", "bedeutet", "sorgen", "autrement", "quinze", "entreating", "longues", "voisine", "insensibility", "washerwoman", "ufer", "caldron", "offert", "summum", "reiche", "irreproachable", "quels", "penser", "sentimentalist", "tenia", "avea", "sublimate", "mitad", "deutlich", "encima", "bowsprit", "antrag", "childishly", "envying", "austerities", "largeness", "hemlocks", "chiffre", "sadden", "passionless", "haunch", "signifie", "thronging", "plainness", "wolfish", "breakfasted", "quidem", "semblant", "ressort", "intrepidity", "pferde", "affectations", "filthiness", "rayons", "sommeil", "hateth", "spitze", "fomented", "opfer", "dietro", "iesus", "conjuncture", "vivante", "docility", "moravians", "wretchedly", "preciso", "nosegay", "fidgeted", "trooped", "deadened", "brimful", "antwoord", "mistrusted", "florentines", "circonstances", "bedarf", "commencer", "fevrier", "vyasa", "assailing", "unseasonable", "blod", "minstrelsy", "voies", "paunch", "sobriquet", "horatius", "serapis", "soeurs", "chaffing", "wahr", "unlettered", "prowled", "uninviting", "buttoning", "agesilaus", "entender", "jaunes", "tragical", "charakter", "vesture", "spricht", "richtung", "salver", "milliers", "profoundest", "reproachful", "petulance", "grovelling", "companionable", "kindliness", "convulsively", "laudanum", "residuum", "tombeau", "servility", "strew", "dites", "unendurable", "ennen", "cassock", "khasi", "aufgabe", "excommunicate", "erwarten", "zaal", "arabesques", "avowal", "interposing", "retirer", "pathless", "revers", "juist", "trooping", "rencontrer", "marteau", "stanch", "perspicacity", "pawed", "swains", "hinzu", "undulation", "versuchen", "highroad", "wesen", "gondolier", "douleurs", "ascendency", "sammen", "hasted", "sehnsucht", "stupefying", "pealed", "stets", "citoyen", "requite", "larges", "omnibuses", "windless", "hinc", "sanguinary", "mohammedans", "tyburn", "souhaite", "firmest", "neus", "dumbly", "allemands", "inquisitiveness", "fourni", "erkennen", "bethought", "debajo", "lebt", "slipshod", "rundt", "produire", "heeds", "tevens", "doted", "overmuch", "chastening", "waxen", "cadaverous", "stroom", "spielt", "croire", "contriving", "waddled", "circassian", "especie", "whin", "greediness", "preferment", "geschreven", "ziele", "remounted", "ontvangen", "strewed", "artifices", "assenting", "anaxagoras", "unge", "cousine", "presentiment", "sturdily", "falleth", "quitte", "censorious", "ouvre", "mekka", "noontide", "ewigkeit", "tausend", "pranced", "augenblick", "pudo", "glowering", "suppliants", "heare", "personnelle", "gezien", "schemed", "disentangled", "qualite", "husbandmen", "fruitlessly", "guerrier", "huntsmen", "photoplay", "dritten", "duchies", "cuirass", "flotte", "hireling", "overweening", "joies", "abruptness", "sieh", "moed", "warred", "nourriture", "niver", "conducteur", "regicide", "dedans", "roved", "remplacer", "ajoute", "auquel", "siller", "touchingly", "hisself", "bliver", "industriously", "confusedly", "eying", "befit", "edified", "profondeur", "portier", "malignity", "revient", "sibylla", "karakter", "becometh", "poort", "halloo", "pasturage", "loisir", "puits", "voort", "soixante", "voglia", "pandu", "geval", "pouvait", "smarted", "paroxysms", "coquin", "mirthful", "vergangenheit", "coeval", "pharao", "ceinture", "galvanometer", "finna", "graceless", "slinking", "enlever", "brocades", "ennobling", "prevenir", "harten", "pleasanter", "hindoo", "falseness", "drap", "betimes", "natuurlijk", "procurer", "malefactors", "lysias", "handmaids", "gefallen", "gaar", "straten", "dommage", "bewail", "rhenish", "twitter", "erano", "schar", "irreverently", "misjudge", "revengeful", "interdicted", "suppliant", "monotonously", "benignly", "certes", "averil", "sauntering", "zusammenhang", "gebracht", "inexpedient", "confiscations", "heartiest", "untutored", "forbears", "exulted", "uninfluenced", "gallies", "omne", "taches", "tourner", "marcius", "pealing", "campagnes", "quoniam", "leathern", "ecclesiastics", "interceded", "nimmt", "intelligibly", "craftily", "chaplets", "abends", "englischen", "bestaat", "makest", "nerved", "braccio", "philosophe", "couvert", "musketry", "caribs", "enfranchised", "maer", "casements", "eatable", "dets", "meanly", "profonde", "theyr", "aspecto", "disinterestedness", "soumettre", "plebe", "nier", "jeta", "blaspheming", "benutzt", "pantheistic", "slumbered", "hostler", "fous", "quartette", "hoed", "stettin", "brusquely", "rankled", "nonconformists", "intonations", "scandalously", "sirup", "exercer", "reproachfully", "pauvre", "rivalling", "obtenu", "eeuw", "howat", "existencia", "delusive", "sepulchral", "sarebbe", "fuor", "pareil", "remplir", "fourscore", "teacheth", "guld", "droned", "balles", "traiter", "rapporte", "wellen", "abler", "wallowed", "recompensed", "quil", "chamberlains", "disgracefully", "brung", "manches", "quei", "atteindre", "asuras", "lamentably", "achaean", "loups", "lowliest", "braggart", "somersetshire", "indisposition", "mithridates", "reconnu", "nutriment", "unkindness", "tranquille", "froh", "gardes", "talo", "rascally", "gardien", "sanoi", "strumpet", "zigzags", "discoursed", "erreicht", "haare", "accost", "manoeuvred", "libels", "blighting", "vileness", "blessures", "soldados", "abase", "outcries", "stampeded", "bithynia", "cupidity", "soundest", "consentement", "risings", "fervid", "truculent", "illimitable", "gayly", "forbearing", "kvar", "despatching", "potentates", "putteth", "impetuosity", "jutted", "encomium", "decke", "behoves", "querulous", "mener", "manchus", "pemmican", "discomfited", "dienen", "sidste", "steden", "mollified", "sulphurous", "entierement", "parterre", "subtile", "ziemlich", "quon", "enfolded", "gedacht", "belongeth", "parian", "emot", "nowise", "vaan", "verdient", "detestation", "theophrastus", "indiens", "sallied", "infinitude", "unchristian", "nachbar", "hubo", "quaff", "scuffling", "commotions", "belang", "numidia", "craning", "indistinctly", "aldrig", "zes", "houdt", "chiefest", "casuistry", "siis", "manchmal", "purposing", "justness", "hundert", "simpering", "soothsayers", "charwoman", "mittag", "facere", "aquella", "chasseurs", "countersign", "frem", "cambric", "thron", "spluttered", "leetle", "quos", "glinted", "facon", "coupable", "lowliness", "lesquelles", "turc", "trundled", "desolated", "kindles", "shineth", "woning", "falchion", "asperity", "pousse", "dran", "secretaire", "effulgence", "banisters", "extricating", "valt", "hesitatingly", "affray", "pensively", "meretricious", "promiscuously", "overset", "chuse", "ruido", "undefinable", "scorning", "multa", "lacedaemonians", "aristoteles", "friede", "censers", "aufgenommen", "tandis", "talke", "trifled", "intelligente", "delightedly", "chimerical", "kanske", "importunate", "disgraces", "zeg", "agitations", "piratical", "indigence", "acquirement", "mutely", "billowy", "querelle", "suzerainty", "imperturbable", "milliners", "pensa", "fecit", "gleiche", "vacillation", "innocente", "toilers", "snored", "heathenism", "rancour", "apercu", "facetiously", "riband", "pecado", "slaine", "vaut", "disdains", "gedaan", "hvem", "amain", "cavil", "kohta", "huskily", "unwarrantable", "glowered", "curates", "anent", "wenigen", "konnten", "worthier", "vooral", "leered", "palmy", "religieux", "truncheon", "hovels", "milliards", "unlovely", "abjure", "plenteous", "piedmontese", "debauch", "holocausts", "imperatively", "philadelphus", "darky", "ravening", "kentuckian", "methought", "fagot", "foulest", "rills", "gaven", "treize", "leise", "dragoman", "micht", "affrighted", "unsocial", "loger", "dejectedly", "tamely", "reposing", "ausdruck", "phlegmatic", "mightest", "dispossess", "cataloguers", "gibe", "drily", "languorous", "paire", "tode", "foulness", "zelfs", "calumnies", "scythes", "shirked", "disapprobation", "propitiate", "hilft", "usurpers", "lagen", "estis", "inspirer", "gainsay", "ambrosial", "atteinte", "intanto", "conciencia", "provender", "schulter", "navire", "matronly", "andern", "sourire", "ungracious", "overawed", "mukaan", "relenting", "bijna", "angesehen", "coude", "dickon", "vapeur", "maintenir", "sluices", "geweest", "erziehung", "zitten", "importe", "raisonnable", "canot", "grundlage", "hessians", "undreamed", "equable", "oppressively", "chacune", "zaak", "pourront", "indorsed", "kasteel", "indulgently", "takaisin", "superfluity", "pantalon", "gossiped", "generalissimo", "coquettish", "zegt", "konung", "accepter", "expiate", "commiseration", "voudrais", "counterpoise", "sawest", "inquiringly", "betes", "romanism", "northmen", "folgt", "cuya", "schicksal", "travaille", "thae", "leitung", "unfeigned", "impalpable", "murmurings", "conjointly", "excitements", "zambesi", "vilken", "comeliness", "verra", "hambre", "indiquer", "grossness", "cuivre", "noget", "countrey", "carefulness", "blijft", "douceur", "vaporous", "oarsmen", "seigneurs", "toilsome", "proprieties", "listlessness", "waarin", "pities", "tredje", "mortify", "gipsies", "neapel", "unhallowed", "injudicious", "gesetze", "remonstrances", "uninterruptedly", "revanche", "suam", "ither", "unmanly", "mazy", "forebodings", "fickleness", "tuvo", "gelukkig", "geschlecht", "unsheathed", "freilich", "heiligen", "palest", "impulsion", "empirische", "vano", "sitten", "illis", "votaries", "factious", "braw", "verdadero", "shabbily", "hollande", "camarades", "slighter", "yere", "homewards", "trous", "achten", "rapine", "materie", "snuffing", "schwarzen", "sterben", "bezig", "abnegation", "yeare", "vostre", "kerl", "widerstand", "betrachten", "erinnern", "betake", "arbeiter", "klaar", "outspread", "thim", "sendeth", "winde", "lichaam", "zetten", "whirr", "alarum", "doigt", "daarom", "liten", "declara", "gebrauch", "jambe", "paie", "unmerciful", "apporter", "demoiselles", "reprobation", "lache", "burgomaster", "camest", "sonder", "extravagances", "esset", "fellah", "verlassen", "gewinn", "wakening", "vacantly", "discoursing", "cablegram", "tourne", "attendre", "schlechte", "lauf", "injuriously", "spluttering", "felsen", "gloried", "argives", "paarden", "japhet", "cabane", "hende", "zacht", "promontories", "mignonette", "supplicate", "joindre", "freundschaft", "pattering", "unromantic", "sophistical", "frescoed", "sauver", "nobleness", "sealskin", "bewilder", "gwine", "zeven", "consulship", "aminta", "brauchen", "fuite", "unclouded", "affability", "affright", "recantation", "threshed", "malen", "gladdened", "weisen", "fausse", "ruses", "expostulation", "faisait", "heraus", "paille", "delawares", "devait", "tirer", "reines", "galled", "esel", "verres", "atteint", "slaveholder", "fuisse", "meddled", "soldaten", "protestation", "cambyses", "enmities", "becalmed", "genou", "verbunden", "hver", "muut", "leprous", "lambent", "wolken", "sacristan", "lavishing", "wending", "disquieted", "solchen", "benedictions", "niggardly", "herte", "teki", "ankunft", "solides", "gesetzt", "dangereux", "evincing", "vraie", "fauteuil", "naturels", "eue", "buckboard", "noisome", "veinte", "malades", "impassible", "oblations", "worten", "intoxicate", "prenant", "graue", "entweder", "exasperate", "curtsey", "bestimmten", "exclusivement", "babyhood", "sojourned", "censuring", "disrespectfully", "mesmeric", "apprehensively", "roofless", "despoil", "direst", "razones", "inroad", "terminer", "vainglorious", "wenige", "benevolently", "archbishopric", "hatchway", "eigenschaft", "pinnace", "slighting", "vorher", "falsch", "maintien", "ellinor", "sepulchres", "extirpate", "adrianople", "imposer", "schlimmer", "wies", "imperiously", "kuu", "rhetorician", "totta", "portefeuille", "unconcern", "toucheth", "requited", "geburt", "suffit", "peloponnesus", "postern", "irremediable", "hamilcar", "quavering", "unperceived", "leonine", "botte", "wonderingly", "haversack", "liet", "ennemi", "handen", "dawdling", "spiritless", "thorwald", "rejoindre", "inutile", "signally", "loitered", "benefices", "hewing", "abysses", "beginnt", "mouldering", "schmerzen", "everlastingly", "descried", "aquellas", "vosotros", "miten", "froward", "elend", "audaciously", "indelicate", "einrichtung", "umfang", "chinamen", "prostrating", "ceremonious", "slaveholding", "unworldly", "ideality", "fece", "fathomed", "boord", "waan", "plafond", "erzeugt", "gekommen", "tranquilly", "delectation", "honoria", "couldst", "prattling", "suivent", "terram", "prate", "submissively", "whithersoever", "parcourir", "assise", "soutenir", "girdled", "abased", "versucht", "niemals", "antient", "semblables", "despairingly", "alguno", "munificence", "throwed", "gervaise", "habitude", "impetuously", "providentially", "veulent", "coom", "harangued", "provincias", "wahren", "glorying", "cockade", "unfrequently", "inconstancy", "betrifft", "ninguno", "doun", "gratifications", "impenitent", "gayety", "arriver", "sagesse", "kwam", "foule", "turm", "bildet", "blijven", "sternness", "vede", "lames", "gunst", "complot", "knapsacks", "engross", "tristes", "appelle", "gracefulness", "communed", "calmest", "glutted", "largement", "dallying", "witticism", "fatted", "blauen", "hottentots", "penances", "brengen", "glimmered", "bretons", "servitors", "refus", "fehlt", "cxar", "ewig", "airily", "gegeven", "schluss", "maudit", "autoridad", "kinsfolk", "erinnerung", "essayer", "distrusting", "tartary", "genoeg", "fremde", "droops", "blandishments", "individus", "remonstrate", "improvident", "handsomer", "blazoned", "vatten", "plainte", "damps", "machten", "bonhomie", "adverted", "soweit", "sacerdote", "productiveness", "gestes", "druse", "quaver", "trouw", "ausgang", "versuche", "wrapt", "draweth", "prit", "tampoco", "versification", "sojourning", "acclamations", "aimez", "unfaltering", "loftiness", "emendation", "behandelt", "clownish", "criado", "tellement", "fordi", "remettre", "redound", "auront", "objektive", "moodily", "discords", "outworn", "honeycombed", "gedanke", "venant", "anspruch", "drauf", "trouvent", "allers", "superannuated", "schauen", "viands", "amiability", "kaisers", "victualling", "religieuse", "wirklichkeit", "envoie", "dicha", "strenge", "unwearied", "punctilious", "turne", "entscheidung", "egotist", "jouissance", "falsche", "schier", "ursprung", "importunity", "distractedly", "zele", "vexations", "seraient", "piastres", "boche", "bewitch", "allures", "frisking", "rottenness", "rufen", "sentimentalism", "clanged", "jupes", "rechter", "privily", "ungenerous", "asketh", "eigenlijk", "absented", "euboea", "fiefs", "honom", "sympathised", "upbraided", "thermidor", "ignominiously", "mischiefs", "appertain", "joko", "perd", "enviously", "wahrscheinlich", "joyed", "gegner", "einfache", "bhishma", "clairement", "eate", "maddest", "adresser", "cabalistic", "conventionality", "italiens", "aliquid", "lidt", "whiffs", "lleno", "manufactories", "twelvemonth", "undimmed", "gjorde", "heah", "parvenir", "faithlessness", "vilain", "contrives", "wistfulness", "genannt", "geleden", "munificent", "fortement", "glaive", "maggior", "convoked", "veste", "malefactor", "gelangen", "dotage", "palliate", "oxus", "pedants", "quaked", "malade", "affronts", "explique", "reproaching", "excellences", "venturesome", "roues", "severer", "fremd", "fusillade", "muita", "feareth", "endroits", "maanden", "bareheaded", "girding", "anzi", "taire", "kopje", "illud", "ilman", "maxence", "wrings", "ferma", "hummocks", "detraction", "dicht", "perdre", "charbon", "foure", "subserve", "cherubims", "toilettes", "liebhaber", "lenity", "songe", "respecte", "sabots", "podia", "insolently", "blik", "dimpling", "quiconque", "ehre", "littleness", "homines", "gammal", "highnesses", "awaked", "upbraid", "unsubstantial", "muren", "dezelfde", "proselyte", "authoress", "fabel", "grandee", "pleasantry", "setteth", "chaldea", "pensioned", "yeardley", "tiefe", "considerately", "gattung", "denkt", "poursuite", "teuton", "pestilent", "sofern", "bountifully", "desisted", "senecas", "jollity", "enrica", "inexpressibly", "sunshiny", "dicitur", "handeln", "begint", "oeufs", "amanuensis", "dreariness", "animi", "comprenant", "smites", "schlacht", "schauspieler", "bezeichnet", "orisons", "reposes", "vart", "hauses", "geduld", "fieri", "mischance", "koska", "hospitably", "metaphysician", "vulgarly", "construit", "invectives", "poitrine", "perdus", "blive", "voulu", "pompously", "discourtesy", "hazarded", "curtsy", "palpitating", "marido", "plaisirs", "ennoble", "dira", "unsought", "palsied", "sartin", "panegyric", "profanation", "unfitted", "halfe", "drinken", "imprecations", "virtuously", "inconceivably", "vouloir", "assiduity", "entstehen", "abschied", "asiatics", "artificers", "ohren", "murderess", "pouvons", "radicle", "volontaires", "villany", "forded", "superintended", "abominably", "zweck", "familier", "enervating", "tumults", "philippus", "pouces", "forswear", "astuteness", "heiter", "liebes", "kenntnis", "gehn", "molte", "lediglich", "musst", "hauberk", "domestique", "geluk", "unspotted", "altname", "legt", "bounden", "declaimed", "unexampled", "todes", "tearless", "basely", "vorstellung", "labios", "vond", "hubiera", "speakest", "teemed", "killeth", "preternaturally", "genommen", "pauvres", "negress", "seien", "haranguing", "quaintness", "verser", "stoical", "tyd", "aptness", "retrouve", "mehreren", "malediction", "givest", "discreditable", "brilliants", "unseeing", "connived", "connais", "mourir", "reicht", "crabbed", "obsequies", "perverseness", "latticed", "pleadingly", "besiegers", "busying", "brazo", "cudgels", "heisst", "paroisse", "befehl", "machte", "soldierly", "musste", "richten", "exhalations", "rapturously", "forelock", "luy", "esteems", "agonised", "hirelings", "hoogste", "jauntily", "erscheinen", "declivity", "vivants", "reviling", "sixe", "altid", "retrouver", "ailed", "garlanded", "abjectly", "vernunft", "churl", "vrijheid", "guds", "rendue", "erden", "erant", "telegraphing", "archly", "statesmanlike", "souverain", "yeares", "duft", "gezegd", "kust", "woorden", "quelconque", "dunghill", "declaim", "bucklers", "stouter", "seuls", "unpractical", "sehe", "reverenced", "derfor", "hominum", "voeten", "liveried", "disfavour", "genially", "gezeigt", "modish", "plomb", "gennem", "prier", "vorn", "deigns", "careering", "thenceforward", "trug", "hasdrubal", "kanssa", "hempen", "miltiades", "growed", "decrepitude", "thinkest", "effluvia", "ordres", "figurer", "grimness", "repassed", "meditatively", "sinecure", "mettent", "stopt", "riseth", "kanzler", "invloed", "verlust", "figger", "underrate", "laune", "jederzeit", "pardonable", "vnder", "choleric", "inclose", "bided", "beggary", "desto", "boeotia", "pleasantest", "deil", "gashed", "exordium", "tocsin", "alcun", "spitefully", "gehalten", "tonnerre", "abbia", "brocaded", "forwardness", "drawling", "testily", "gebunden", "ruhig", "unfasten", "tyran", "precocity", "resistless", "wangen", "spasmodically", "mesdames", "resignedly", "festoons", "aboute", "varlet", "viennent", "threatenings", "erkenntnis", "prevision", "dacht", "squaws", "cesse", "mahomed", "plunderers", "navires", "tremblement", "comfortless", "incautious", "luxuriance", "petto", "creditably", "jolies", "impressiveness", "cheyennes", "finit", "needeth", "superabundance", "precipitately", "unceremonious", "sidewise", "anacreon", "lisping", "sonna", "delante", "rideaux", "prig", "gezicht", "parfaite", "vituperation", "manifeste", "cabman", "fawned", "oever", "untaught", "juley", "einiger", "voorkomen", "gelijk", "forsworn", "imperilled", "sichtbar", "promptitude", "indiaman", "cantered", "allurements", "bataillon", "lasst", "omkring", "juicio", "noin", "distressful", "justifier", "bestimmungen", "verbinden", "bestimmte", "foremast", "bestaan", "stimmung", "meeste", "accorder", "thirsted", "irruption", "professedly", "geschwind", "groweth", "stupefaction", "lanterne", "larmes", "harangues", "remorselessly", "appartient", "naturall", "stupide", "dexterously", "extempore", "viscid", "abaft", "auraient", "reproving", "ottilie", "waer", "scandale", "turnus", "helpen", "begonnen", "pestilential", "schaffen", "merchantmen", "flammen", "atter", "ensi", "circumlocution", "queenly", "livest", "grandees", "devenue", "adjure", "allant", "obstreperous", "gnaden", "olet", "heedlessly", "soif", "lolled", "flatterer", "stube", "sentimentally", "gowned", "tutelary", "hindmost", "furent", "faibles", "monkish", "zouaves", "ineffectually", "contraste", "duidelijk", "turbaned", "guillotined", "conformably", "meane", "zugleich", "disdaining", "solcher", "ouvrier", "zieht", "lowness", "annoncer", "unpleasing", "disgracing", "disant", "begon", "heartiness", "recompence", "petulantly", "prinzip", "casteth", "rhetoricians", "sulkily", "minuteness", "solemnities", "vexes", "tomando", "impecunious", "avond", "menschlichen", "loob", "aliis", "snaky", "confessedly", "slecht", "wheedle", "hushing", "gxi", "corpore", "ungraceful", "queerly", "schwere", "parfaitement", "holdeth", "straggled", "picturesquely", "mainmast", "disquisition", "tiefer", "vorgestellt", "dulness", "pistoles", "unexceptionable", "finnes", "soumission", "liebt", "maie", "centaines", "havde", "mutinied", "terwijl", "palanquin", "contenir", "milesian", "poursuivre", "lacedaemonian", "volgen", "respire", "gehad", "untrammelled", "stentorian", "flatterers", "tomber", "cantering", "minces", "foible", "questionings", "choeur", "kehrt", "manacled", "haud", "thereabout", "contenta", "soone", "hauptstadt", "daheim", "heedlessness", "coquetry", "wended", "getan", "leggen", "onkel", "barbadoes", "wifely", "tantas", "cuius", "rouler", "expliquer", "mortel", "worthiest", "pusillanimous", "personnage", "swaggered", "accepte", "forbore", "gravelled", "publikum", "opportunely", "odoriferous", "insensate", "showeth", "causeless", "partem", "dennoch", "imprudently", "drollery", "makt", "uncongenial", "feront", "noght", "philosophes", "sententious", "reconnoitre", "doigts", "eatables", "intorno", "quiera", "sabines", "catholiques", "housetops", "rostro", "descry", "zouden", "dachte", "drona", "complaisance", "tinkled", "rappelle", "bewailing", "entrenchments", "llegado", "stilte", "sternest", "vijf", "vaches", "befitted", "preeminently", "enervated", "profiter", "ceremonials", "sedately", "choisis", "trone", "gabble", "searchingly", "somewheres", "patriotes", "tyrannous", "wigwams", "paysan", "blevet", "ooit", "suffisamment", "monosyllables", "sluggard", "gelegen", "dissembled", "verlieren", "ieder", "impudently", "jotka", "contrariety", "unprovided", "prinzen", "ruhm", "cerveau", "inclosing", "osaa", "supping", "anteil", "diplomatist", "barefaced", "plighted", "faudrait", "unterschied", "fermes", "verborgen", "ofttimes", "neemt", "steersman", "caitiff", "thebans", "keek", "aient", "seyn", "brumaire", "embroil", "pennon", "athirst", "gnashed", "neighing", "durchaus", "glaces", "magnanimously", "compagnon", "anchorite", "boisterously", "chancing", "dagegen", "tantos", "prenez", "momente", "sterke", "provinz", "withall", "lippen", "donnent", "consorted", "miry", "hollanders", "perh", "exactement", "exacte", "entend", "gewonnen", "moindre", "humeur", "souple", "proserpina", "fluss", "conclure", "dotter", "effectivement", "feelingly", "noised", "bondmen", "unseres", "bashfulness", "vaunt", "wollt", "greatcoat", "unmeaning", "turcs", "untrodden", "nerveless", "insurrectionary", "ruisseau", "refuser", "quondam", "zimmern", "raillery", "faciles", "accordant", "mixt", "ruft", "humide", "sensibles", "prudente", "indissolubly", "teils", "treten", "geschlossen", "extenuation", "favori", "compagnons", "merriest", "loftily", "pourrez", "placidity", "hicieron", "gueule", "regne", "doffed", "herodes", "quatorze", "tegenwoordig", "usurer", "voluntad", "geniality", "twopence", "froide", "rampe", "hearkening", "flippancy", "breastworks", "ruleth", "pellucid", "couvre", "frighted", "hearest", "evadne", "kreise", "oublier", "idees", "irreligion", "bruits", "waarschijnlijk", "prodigality", "bessere", "vuol", "enveloppe", "freshet", "stoutest", "takest", "livelong", "joyeuse", "serez", "citadelle", "appeare", "schaden", "sublimes", "verfassung", "opprobrious", "cnut", "propitiatory", "voyez", "acquirements", "drearily", "grenze", "estuvo", "violences", "hideousness", "drawed", "bewegen", "satte", "appartenant", "paquets", "synes", "parecer", "mechlin", "diciendo", "collines", "cabals", "scherz", "disait", "atli", "superscription", "lieue", "filched", "suffrages", "darkies", "maitres", "swineherd", "unworthily", "disturber", "foresaid", "redoubts", "boding", "ouvriers", "benumbed", "wenigstens", "carouse", "habere", "composedly", "paleis", "nilus", "eenvoudig", "heiresses", "schien", "pistolet", "ambuscade", "repine", "thinges", "geheel", "amants", "jingled", "autrefois", "breakfasting", "noeud", "regardez", "zufall", "drowsily", "religieuses", "voisins", "verfasser", "nogen", "engraven", "nahrung", "gaoler", "bancs", "waarop", "jolis", "evasively", "draps", "weisheit", "habitantes", "brouillard", "resentfully", "acquaintanceship", "declamatory", "elate", "juif", "halb", "geister", "quiso", "gleicher", "supplicating", "schlaf", "zahlreichen", "trembler", "wickedest", "bekannten", "adroitness", "bestir", "helst", "multitud", "wachten", "auxquels", "dropt", "schoolmistress", "obloquy", "profitless", "mourant", "wijze", "saidst", "flucht", "unconcealed", "mettant", "coursers", "disent", "mohammedanism", "finir", "abstemious", "krankheit", "cannonade", "otti", "brume", "grandmamma", "fahrt", "moeite", "tediousness", "verdadera", "ongeveer", "horreur", "licet", "ouvertes", "warbled", "genomen", "vuestra", "clamors", "complaisant", "votary", "hesper", "flossy", "zufrieden", "geloof", "luxuriantly", "loopt", "haled", "grizel", "certainement", "duquel", "inharmonious", "amatory", "todavia", "hindoos", "warme", "officiers", "meaneth", "videtur", "knavery", "dije", "blivit", "prennent", "harrowed", "appris", "podido", "stod", "mussulman", "unhesitating", "sybarite", "montrent", "leaue", "fulco", "irresolution", "geschickt", "schlagen", "proverbially", "waywardness", "maturer", "nennen", "treiben", "servius", "bepaald", "daraus", "faudra", "caresse", "bijzonder", "benignant", "appartiennent", "domestiques", "trifft", "arraign", "khoja", "cawing", "fragt", "gilds", "bottes", "antipathies", "afeard", "bishoprics", "marier", "bewegt", "teutons", "whelps", "bestehen", "victual", "healths", "heutigen", "kertaa", "benignity", "whitsuntide", "gesund", "coxcomb", "shrewdest", "couverts", "hecha", "jener", "undistinguishable", "satrap", "haen", "stateliness", "copses", "richesse", "poursuit", "adown", "brokenly", "coffre", "gilberte", "eddying", "couvent", "hawser", "circumstanced", "werry", "muratori", "heartlessness", "foully", "boors", "quailed", "esquimaux", "peint", "helas", "broils", "contenting", "troublous", "nulle", "kinswoman", "puissent", "bunten", "silencieux", "gegend", "quaffed", "fervency", "schuldig", "sortes", "courbe", "bethink", "eind", "comen", "serried", "careworn", "abstractedly", "besitzen", "unbent", "frolicsome", "foudre", "overrate", "directoire", "jambes", "betweene", "stolidly", "gerechtigkeit", "throned", "feind", "gnade", "saisir", "farine", "affably", "lendemain", "aristocracies", "hexameter", "volontaire", "pracht", "cravate", "aikana", "irgendwo", "fanns", "parricide", "strewing", "prosperously", "allurement", "curtsied", "mither", "recreant", "expiated", "bedienen", "roula", "blott", "allait", "reihen", "tournant", "entgegen", "bareness", "shamefaced", "bords", "perspicuity", "gegenstand", "visitant", "mulle", "organes", "kriege", "connue", "annos", "enow", "jocund", "unutterably", "entdeckt", "winna", "brahmanism", "appius", "inextinguishable", "batavian", "remarquable", "knaben", "betokened", "griechischen", "braccia", "merchantman", "habited", "betrachtet", "sympathising", "hvide", "rejoicings", "draga", "entreats", "conciliated", "foeman", "confute", "voulait", "unexpectedness", "indispensably", "gevoel", "endearments", "interj", "wheedling", "touchant", "aliud", "coyness", "quarante", "zuvor", "tirant", "teilnahme", "dirige", "mantling", "extenuate", "interessen", "battre", "quartiers", "bracht", "vormen", "disinherit", "restent", "aufenthalt", "calomel", "ouverts", "entsteht", "disquietude", "naething", "enormities", "kerchiefs", "helft", "remercie", "beruht", "genoux", "artillerymen", "hoeren", "flatteries", "unfading", "gehabt", "dight", "jouir", "waylay", "benefactions", "angenommen", "pitilessly", "pattered", "varandra", "assister", "daies", "cacha", "moest", "uncomplaining", "tulee", "pillowed", "courtes", "sayde", "saisi", "linien", "temor", "imploringly", "unsuspicious", "picturesqueness", "kende", "unresisting", "besitzt", "yez", "tronc", "begann", "musingly", "blieb", "protract", "connus", "disconcert", "argive", "profond", "choler", "pinioned", "tarrying", "hatless", "baith", "epigrammatic", "ilmarinen", "usurers", "boded", "dallied", "seekest", "couverte", "dettes", "schoot", "messire", "vorschlag", "semblent", "geschehen", "seelen", "traversa", "vassalage", "offenen", "manasses", "zuster", "breake", "auxquelles", "designedly", "whensoever", "conciliating", "frucht", "discouragements", "gingen", "semblable", "gegensatz", "inundations", "gelegenheit", "scandalised", "cinquante", "pudiera", "algonquins", "comported", "bange", "fasse", "servian", "stond", "unterschiede", "propitiated", "hogsheads", "contumely", "ollut", "connaitre", "provoquer", "herrschaft", "erinnert", "clamoured", "lacedaemon", "peines", "meint", "bourgeoise", "nerfs", "aiment", "begge", "possit", "nomme", "plis", "piquancy", "unpremeditated", "desirest", "declaiming", "bestimmen", "marchesa", "dizzily", "pauperism", "samnites", "schlief", "livrer", "sobald", "nettled", "allerede", "odeur", "comprends", "peroration", "preuves", "dahin", "verbergen", "aandacht", "vertreter", "daarna", "lourd", "wilfulness", "betrekking", "grunde", "retenir", "esteeming", "fallait", "ressemble", "klage", "hauing", "prolixity", "sonner", "subterfuges", "stof", "zahlreiche", "harer", "expostulated", "barbarities", "prudery", "bivouacked", "fusil", "langt", "passagers", "firesides", "vicissitude", "salido", "allerlei", "joyousness", "vorsicht", "behoved", "porticoes", "gebirge", "tragedian", "fastnesses", "nebst", "waarvan", "ruminated", "reprend", "commonalty", "lapset", "guerres", "indorse", "suffisante", "curst", "flounces", "upbraiding", "revenging", "feebler", "venger", "miteinander", "chaffed", "overstrained", "consolatory", "houre", "einzigen", "spreken", "contemporains", "heut", "augured", "verran", "sanscrit", "halfpence", "cutlasses", "cupfuls", "tremulously", "quavered", "puir", "governesses", "besluit", "hetzelfde", "veracious", "wesentlich", "readiest", "disconsolately", "squally", "captaine", "demandez", "inzwischen", "seules", "cumbrous", "palings", "satisfait", "geschikt", "devoirs", "rappeler", "croit", "orten", "habent", "didna", "demoniacal", "voraus", "distempers", "execration", "drest", "colonnes", "tabooed", "retenue", "guicciardini", "gaed", "vuestro", "cierta", "einfachen", "hundra", "belike", "saltpetre", "forborne", "cuyas", "tardily", "satisfaire", "dicere", "verbrechen", "zichzelf", "superabundant", "vilja", "versteht", "brengt", "scudding", "verschieden", "destinee", "deprecatory", "larboard", "keinem", "manuscrit", "shrubberies", "volkes", "pertinacity", "amabel", "parme", "herrlich", "hunc", "flurried", "avevano", "deferentially", "souviens", "mazarine", "infiniment", "overborne", "rempli", "goeden", "reinen", "engager", "jocose", "shawnees", "vaterland", "blessure", "restant", "maist", "ursache", "oublie", "eminences", "obscur", "afstand", "kepe", "cailloux", "enemigo", "toits", "weite", "pm", "video", "info", "ebay", "dvd", "website", "photos", "forums", "yahoo", "server", "pc", "feedback", "blog", "options", "audio", "fax", "rss", "porn", "faq", "sep", "powered", "electronics", "database", "microsoft", "url", "update", "downloads", "apr", "hosting", "videos", "tech", "linux", "jun", "listings", "sony", "google", "environmental", "pics", "sponsored", "eur", "pdf", "usr", "homepage", "lesbian", "logo", "airport", "phones", "cnet", "hp", "eg", "ip", "cameras", "ratings", "paypal", "thu", "rentals", "worldwide", "anti", "nokia", "tx", "anal", "interface", "technologies", "gmt", "xml", "input", "sexy", "mb", "multi", "graphics", "prev", "ads", "mini", "usb", "php", "trademarks", "phentermine", "keywords", "msn", "programming", "isbn", "az", "updates", "desktop", "pst", "fucking", "blogs", "evaluation", "implementation", "angeles", "networking", "australian", "kb", "connect", "dev", "vegas", "module", "pricing", "dvds", "documentation", "coverage", "automotive", "developing", "milf", "ringtones", "xbox", "www", "settings", "monitoring", "nc", "llc", "hardcore", "provider", "techniques", "rd", "websites", "servers", "keyword", "username", "fuck", "paperback", "classifieds", "providers", "upgrade", "auctions", "therapy", "samsung", "affiliate", "admin", "designated", "integrated", "cds", "ipod", "porno", "motorola", "strategies", "affiliates", "multimedia", "xp", "tits", "interactive", "developer", "sitemap", "lab", "cvs", "gamma", "weekend", "lcd", "dj", "parking", "ct", "hentai", "laser", "icon", "basketball", "stats", "hawaii", "nj", "clips", "rw", "vhs", "criteria", "pubmed", "logged", "laptop", "checkout", "tripadvisor", "zoom", "anime", "spam", "bytes", "gb", "bc", "consulting", "aa", "lingerie", "shemale", "parameters", "jazz", "profiles", "mom", "singles", "amounts", "usd", "mg", "pharmacy", "constitutes", "collectibles", "infrastructure", "intel", "soccer", "math", "healthcare", "preview", "devel", "rs", "voyeur", "cisco", "certification", "bookmark", "specials", "bbc", "avg", "panasonic", "permalink", "viagra", "src", "faqs", "trackback", "revised", "broadband", "pda", "dsl", "webmaster", "dna", "diff", "sql", "specs", "ss", "yeah", "sexo", "javascript", "gps", "acc", "euro", "encyclopedia", "interracial", "tn", "suppliers", "playstation", "annotation", "gnu", "lesbians", "aol", "modules", "backup", "personals", "kevin", "perl", "bike", "utc", "albums", "verzeichnis", "hosted", "developers", "kits", "variables", "agenda", "template", "investor", "wildlife", "elementary", "sponsors", "unlimited", "printable", "hardcover", "setup", "booking", "ericsson", "supplier", "bluetooth", "tm", "upcoming", "scores", "weblog", "nh", "alerts", "mysql", "offline", "lifestyle", "converter", "blowjob", "safari", "pdt", "parameter", "adapter", "processor", "node", "hockey", "micro", "laptops", "regulatory", "db", "ph", "epinions", "affordable", "databases", "psp", "ds", "discounts", "boobs", "jennifer", "demo", "lg", "gourmet", "nfl", "avatar", "dildo", "featuring", "misc", "calculator", "holdem", "awareness", "spyware", "packaging", "wallpaper", "biggest", "alumni", "hollywood", "wikipedia", "diabetes", "ml", "wow", "mapping", "indexed", "grid", "plasma", "voip", "consultants", "implemented", "sf", "blogger", "kg", "textbooks", "seminar", "latina", "nasa", "sexcam", "accessibility", "templates", "tab", "router", "concrete", "folder", "womens", "css", "upload", "milfhunter", "mc", "metro", "toshiba", "qty", "airline", "uniprotkb", "beastiality", "lp", "consultant", "researchers", "unsubscribe", "bio", "upskirt", "exam", "logos", "milfs", "sustainable", "pcs", "honda", "cinema", "ag", "blowjobs", "deluxe", "monitors", "sci", "edt", "pmid", "recruitment", "siemens", "expertise", "medline", "innovative", "tampa", "ks", "python", "tutorial", "cruises", "moderator", "tutorials", "collectables", "scripts", "abc", "stereo", "operational", "airlines", "livecam", "hobbies", "telecommunications", "bestiality", "biz", "voltage", "nintendo", "vinyl", "highlights", "designers", "ongoing", "imaging", "blackjack", "analyst", "reliability", "gcc", "ringtone", "oriented", "desktops", "semester", "cumshot", "applies", "casinos", "filters", "nv", "notebooks", "algorithm", "semi", "proteins", "exp", "debian", "epson", "terrorism", "cpu", "allocated", "anytime", "nr", "layout", "initiatives", "lol", "mp", "optimization", "genetic", "modem", "mph", "evaluate", "toyota", "nationwide", "vector", "limousines", "destinations", "pipeline", "ethernet", "postposted", "nba", "busty", "coordinator", "epa", "coupons", "cialis", "bb", "ron", "modeling", "memorabilia", "alberta", "org", "okay", "workplace", "wallpapers", "firefox", "eligibility", "clinic", "involvement", "placement", "vbulletin", "funded", "motorcycle", "presentations", "wiki", "radar", "citysearch", "nsw", "pci", "guestbook", "pizza", "rc", "bmw", "mpeg", "shoppers", "cst", "ceo", "twiki", "counseling", "medication", "shareware", "dicke", "configure", "institutional", "metabolism", "rm", "pdas", "outcomes", "sri", "thumbnail", "api", "acrobat", "thermal", "config", "urw", "regardless", "wishlist", "sms", "shit", "trailers", "syndrome", "iraqi", "foto", "tabs", "gm", "rt", "shopper", "nikon", "customize", "sensor", "telecom", "indicators", "thai", "emissions", "dd", "boost", "spanking", "supplements", "icons", "tranny", "catering", "aud", "camcorder", "implementing", "labs", "dynamics", "crm", "rf", "cumshots", "bukkake", "shorts", "td", "amp", "sm", "usc", "environments", "trembl", "blvd", "amd", "emails", "wv", "insider", "seminars", "ns", "vitamin", "processed", "functionality", "intermediate", "billing", "diesel", "bs", "promotional", "chevrolet", "compaq", "authentication", "showtimes", "sectors", "bandwidth", "img", "schedules", "cached", "rpm", "florist", "webcam", "nutten", "automated", "pee", "nipples", "tvs", "manga", "mhz", "orientation", "analog", "packard", "payday", "deadline", "robot", "assess", "gnome", "gadgets", "automation", "impacts", "cl", "ieee", "corp", "personalized", "gt", "conditioning", "teenage", "nyc", "partnerships", "slots", "toolbar", "basically", "genes", "firewall", "scanner", "occupational", "hs", "integer", "treatments", "camcorders", "basics", "rv", "struct", "genetics", "punk", "enrollment", "interfaces", "advertisers", "deleted", "rica", "inkjet", "peripherals", "brochure", "bestsellers", "eminem", "antenna", "bikini", "decor", "lookup", "harvard", "podcast", "interactions", "nike", "pissing", "plugin", "latinas", "customized", "dealtime", "temp", "intro", "zus", "fisting", "tramadol", "jeans", "fonts", "quiz", "mx", "sigma", "xhtml", "recordings", "ext", "minimal", "polyphonic", "outsourcing", "adjustable", "allocation", "michelle", "ts", "demonstrated", "handheld", "florists", "installing", "ncaa", "phd", "blogging", "cycling", "messaging", "pentium", "aka", "sampling", "refinance", "cookie", "goto", "calendars", "compatibility", "netscape", "rankings", "measuring", "tcp", "dv", "israeli", "medicare", "skiing", "hewlett", "flickr", "priorities", "bookstore", "timing", "parenting", "fotos", "britney", "freeware", "fucked", "pharmaceutical", "workforce", "nodes", "ghz", "targeted", "organizational", "skype", "gamecube", "rr", "titten", "excerpt", "halloween", "methodology", "housewares", "resistant", "recycling", "gbp", "coding", "slideshow", "tracker", "hiking", "jelsoft", "headset", "distributor", "archived", "photoshop", "jp", "bt", "diagnostic", "rfc", "downloaded", "sl", "seo", "isp", "nissan", "acoustic", "cassette", "initially", "hb", "jpg", "tc", "sunglasses", "planner", "stadium", "mins", "sequences", "coupon", "ssl", "gangbang", "opt", "flu", "mlb", "tagged", "bikes", "gp", "submissions", "oem", "lycos", "zdnet", "broadcasting", "artwork", "cosmetic", "terrorist", "informational", "ecommerce", "dildos", "coordination", "connector", "brad", "combo", "activation", "mitsubishi", "constraints", "dimensional", "mozilla", "toner", "latex", "anymore", "oclc", "locator", "pantyhose", "plc", "msg", "nylon", "palestinian", "trim", "pixels", "hispanic", "cv", "cb", "procurement", "espn", "untitled", "totals", "marriott", "starring", "referral", "nhl", "optimal", "protocols", "highlight", "reuters", "fc", "gel", "omega", "evaluated", "assignments", "fw", "doug", "saver", "grill", "gs", "aaa", "wanna", "macintosh", "projector", "std", "herbal", "retailer", "vitamins", "vid", "panties", "connectivity", "algorithms", "bbw", "collaborative", "fda", "turbo", "thats", "hdtv", "asin", "spotlight", "reset", "expansys", "connecting", "logistics", "kodak", "danish", "scenario", "fs", "approx", "symposium", "nn", "weekends", "screenshots", "deviant", "adapters", "macro", "mandatory", "syndication", "gym", "kde", "viewer", "signup", "cams", "receptor", "piss", "autos", "deployment", "proc", "directive", "fx", "dl", "starter", "upgrades", "tapes", "governing", "retailers", "ls", "cbs", "spec", "realty", "instructional", "phpbb", "permissions", "biotechnology", "outreach", "lopez", "upskirts", "debug", "boob", "exclude", "peeing", "equations", "bingo", "spatial", "respondents", "lt", "ceramic", "scanners", "atm", "xanax", "eq", "unavailable", "assessments", "cms", "footwear", "beijing", "utils", "phys", "sensitivity", "calgary", "dialog", "wellness", "antivirus", "previews", "pickup", "nascar", "mega", "moms", "addiction", "chrome", "ecology", "botswana", "nav", "cyber", "verizon", "enhancement", "clone", "dicks", "lambda", "baseline", "silicon", "beatles", "soundtrack", "lc", "cnn", "lil", "participant", "scholarships", "recreational", "electron", "motel", "sys", "solaris", "icq", "yamaha", "medications", "homework", "advertiser", "encryption", "downloadable", "scsi", "focuses", "toxic", "dns", "thumbnails", "pty", "ws", "bizrate", "sox", "gamespot", "wordpress", "vulnerability", "accountability", "celebrate", "zoophilia", "univ", "scheduling", "therapeutic", "travesti", "relocation", "np", "competitions", "tft", "jvc", "vibrator", "cosmetics", "concentrations", "vibrators", "estonia", "dt", "cgi", "showcase", "pixel", "focusing", "viruses", "gc", "stickers", "leasing", "lauren", "macromedia", "additionally", "nano", "copyrights", "mastercard", "updating", "kijiji", "conjunction", "cfr", "validation", "cholesterol", "slovenia", "folders", "routers", "starsmerchant", "arthritis", "bios", "pmc", "myspace", "theorem", "nb", "stylus", "topless", "structured", "jeep", "mba", "reload", "distributors", "levitra", "mono", "particles", "coordinate", "widescreen", "squirting", "rx", "apps", "gsm", "rebate", "meetup", "ddr", "rec", "forecasts", "sluts", "ciao", "ampland", "chem", "shopzilla", "payroll", "cookbook", "uploaded", "americas", "connectors", "twinks", "techno", "elvis", "latvia", "jd", "gpl", "irc", "dm", "bangkok", "photographers", "infections", "brisbane", "configured", "amino", "clinics", "mls", "saddam", "threesome", "handjob", "transexuales", "technician", "inline", "executives", "audi", "staffing", "cognitive", "closure", "ppc", "volt", "div", "playlist", "registrar", "jc", "cancellation", "plugins", "sensors", "freebsd", "acer", "prostores", "reseller", "dist", "intake", "relevance", "tucson", "swingers", "headers", "geek", "xnxx", "hormone", "childrens", "thumbzilla", "avi", "pichunter", "thehun", "columnists", "bdsm", "ide", "valium", "rpg", "cordless", "pd", "prot", "trivia", "adidas", "tgp", "retro", "livesex", "statewide", "semiconductor", "boolean", "diy", "interact", "olympics", "identifier", "worldsex", "jpeg", "startup", "suzuki", "ati", "calculators", "abs", "slovakia", "flip", "rna", "chrysler", "plumbing", "nuke", "projectors", "pharmacies", "ln", "introducing", "nicole", "latino", "uc", "asthma", "developmental", "zope", "regulated", "gmbh", "buf", "ld", "webshots", "sprint", "inputs", "genome", "documented", "paperbacks", "keyboards", "eco", "indie", "detector", "notifications", "msgid", "transexual", "mainstream", "evaluating", "subcommittee", "suse", "mf", "motels", "msgstr", "volleyball", "mw", "adipex", "toolbox", "ict", "browsers", "dp", "surfing", "creativity", "oops", "nipple", "behavioral", "bathrooms", "sku", "ht", "insights", "midwest", "karaoke", "nonprofit", "hereby", "containers", "integrate", "mobiles", "screenshot", "kelkoo", "consortium", "pts", "seafood", "rh", "rrp", "playboy", "fg", "mazda", "roster", "symantec", "wichita", "nasdaq", "ooo", "hz", "timer", "highs", "ipaq", "alignment", "masturbating", "comm", "nhs", "aye", "visibility", "reprints", "accessing", "midlands", "analysts", "dx", "sk", "locale", "biol", "oc", "fujitsu", "exams", "aj", "medicaid", "treo", "infrared", "tex", "cia", "sublimedirectory", "poly", "dod", "wp", "naturals", "neo", "motivation", "lenders", "pharmacology", "bloggers", "powerpoint", "surplus", "sonic", "obituaries", "belarus", "zoning", "guitars", "lightweight", "tp", "jm", "dpi", "scripting", "gis", "snapshot", "caring", "expo", "dominant", "specifics", "itunes", "cn", "newbie", "bali", "sponsorship", "headphones", "volkswagen", "marker", "strengths", "emirates", "terrorists", "airfare", "distributions", "vaccine", "crap", "viewpicture", "volvo", "bookings", "minolta", "gui", "rn", "abstracts", "pharmaceuticals", "andale", "remix", "thesaurus", "ecological", "cg", "appraisal", "maritime", "href", "benz", "wifi", "fwd", "homeland", "championships", "disco", "endif", "lexmark", "cleaners", "hwy", "cashiers", "guam", "preventing", "compliant", "hotmail", "refurbished", "activated", "conferencing", "trackbacks", "marilyn", "findlaw", "programmer", "vocals", "yrs", "foo", "gba", "bm", "nightlife", "footage", "howto", "entrepreneur", "freelance", "screensaver", "metallica", "headline", "str", "bahrain", "academics", "pubs", "shemales", "screensavers", "vip", "clicks", "mardi", "sustainability", "formatting", "nutritional", "weblogs", "timeline", "rj", "affiliation", "nudist", "ensures", "sync", "telephony", "realtors", "graphical", "aerospace", "meaningful", "shortcuts", "voyeurweb", "specifies", "logitech", "briefing", "belkin", "accreditation", "wav", "modular", "microphone", "moderators", "memo", "kazakhstan", "standings", "gratuit", "fbi", "qatar", "porsche", "cayman", "rp", "tba", "usgs", "kathy", "graphs", "surround", "lows", "controllers", "consultancy", "hc", "italiano", "rca", "fp", "sticker", "stakeholders", "hydrocodone", "gst", "cornell", "mailto", "promo", "jj", "schema", "catalogs", "quizzes", "obj", "myanmar", "metadata", "floppy", "handbags", "ev", "incurred", "questionnaire", "dept", "euros", "makeup", "troubleshooting", "uzbekistan", "indexes", "pac", "rl", "erp", "gl", "ui", "dh", "fragrances", "vpn", "fcc", "markers", "assessing", "eds", "roommate", "webcams", "webmasters", "df", "computational", "acdbentity", "handhelds", "reggae", "whats", "rides", "rehab", "allergy", "enzyme", "zshops", "condo", "pokemon", "amplifier", "ambien", "worldcat", "titanium", "contacted", "cdt", "recorders", "casio", "postings", "postcards", "dude", "transsexual", "pf", "informative", "girlfriend", "bloomberg", "beats", "scuba", "checklist", "bangbus", "lauderdale", "scenarios", "gazette", "hitachi", "divx", "batman", "hearings", "calibration", "eval", "anaheim", "ping", "prerequisite", "sao", "pontiac", "regression", "trainers", "muze", "enhancements", "renewable", "passwords", "celebs", "gmc", "hh", "adsl", "advisors", "finals", "fd", "acrylic", "tuner", "asn", "toddler", "acne", "listprice", "libs", "cadillac", "malawi", "pk", "sagem", "knowledgestorm", "ppm", "referenced", "gays", "exec", "warcraft", "catalyst", "vcr", "prepaid", "electro", "vietnamese", "lexus", "maui", "handjobs", "squirt", "plastics", "postcard", "tsunami", "internationally", "psi", "buses", "expedia", "pct", "wb", "smilies", "vids", "shakira", "qld", "dk", "findarticles", "routines", "issn", "podcasts", "sas", "ferrari", "outputs", "insulin", "mysimon", "ambient", "oecd", "prostate", "adaptor", "hyundai", "xerox", "merger", "softball", "referrals", "quad", "firewire", "mods", "nextel", "rwanda", "integrating", "vsnet", "msie", "wn", "liz", "ccd", "sv", "burlington", "researcher", "kruger", "viral", "aruba", "realtor", "chassis", "dubai", "llp", "pediatric", "boc", "dg", "asus", "techrepublic", "vg", "filme", "craps", "fuji", "brochures", "tmp", "alot", "benchmark", "highlighted", "antibody", "wiring", "ul", "js", "webpage", "hostels", "pn", "wendy", "diffs", "mumbai", "ozone", "disciplines", "nvidia", "pasta", "serum", "motherboard", "runtime", "inbox", "focal", "bibliographic", "incl", "hq", "propecia", "nbc", "samba", "inspections", "manually", "wt", "flex", "mv", "mpg", "retrieval", "cindy", "lolita", "carb", "importantly", "rb", "upc", "dui", "mh", "discrete", "sexuality", "polyester", "kinase", "televisions", "specializing", "pvc", "blah", "mime", "motorcycles", "thinkpad", "cunt", "feof", "bunny", "chevy", "longest", "tions", "dentists", "usda", "workstation", "flyer", "dosage", "urls", "customise", "marijuana", "adaptive", "enb", "gg", "fairfield", "invision", "emacs", "jackie", "cardiovascular", "ww", "sparc", "cardiac", "learners", "gd", "configuring", "guru", "convergence", "numeric", "kinda", "malpractice", "dylan", "rebates", "pix", "mic", "basename", "kyle", "obesity", "vertex", "bw", "hepatitis", "nationally", "andorra", "mj", "waiver", "specialties", "cingular", "bacterial", "lf", "ata", "bufing", "pam", "dryer", "nato", "funky", "secretariat", "scary", "mpegs", "brunei", "slovak", "mixer", "wc", "sbjct", "demographic", "washer", "springer", "evaluations", "helicopter", "hk", "powerseller", "ratios", "maximize", "cj", "workout", "mtv", "optimize", "leu", "namespace", "align", "peripheral", "confidentiality", "changelog", "orgasm", "condos", "greensboro", "tulsa", "fridge", "qc", "simpsons", "upgrading", "pgp", "frontpage", "trauma", "flashers", "subaru", "tf", "programmers", "pj", "monitored", "installations", "spank", "cw", "motivated", "wr", "fioricet", "rg", "bl", "vc", "wx", "figured", "currencies", "positioning", "heater", "promoted", "moldova", "paxil", "temporarily", "ntsc", "thriller", "apnic", "frequencies", "mariah", "usps", "bg", "planners", "intranet", "psychiatry", "conf", "wma", "aquarium", "cir", "looksmart", "modems", "paintball", "prozac", "acm", "glucose", "norm", "playback", "supervisors", "ips", "dsc", "neural", "hometown", "transcripts", "collectible", "handmade", "entrepreneurs", "robots", "keno", "gtk", "mailman", "sanyo", "nested", "biodiversity", "movers", "workflow", "voyuer", "subsidiaries", "tamil", "garmin", "ru", "fuzzy", "indonesian", "therapist", "mrna", "budgets", "toolkit", "erotica", "dts", "qt", "airplane", "istanbul", "sega", "viewers", "cdna", "harassment", "barbie", "soa", "smtp", "replication", "receptors", "optimum", "neon", "interventions", "internship", "snowboard", "beastality", "webcast", "evanescence", "coordinated", "maldives", "firmware", "lm", "canberra", "mambo", "bool", "cho", "jumping", "antibodies", "polymer", "immunology", "wiley", "bbs", "spas", "convicted", "indices", "roommates", "adware", "intl", "zoloft", "activists", "ultram", "cursor", "stuffed", "restructuring", "simulations", "cz", "cleanup", "crossword", "conceptual", "hl", "bhutan", "liechtenstein", "redhead", "tractor", "unwrap", "telecharger", "safer", "instrumentation", "ids", "groundwater", "gzip", "ricky", "ctrl", "theta", "lightbox", "swaziland", "mediawiki", "configurations", "ethnicity", "lesotho", "rfid", "retailing", "oscommerce", "nonfiction", "homeowners", "racism", "vaio", "gamers", "slr", "licensee", "bisexual", "rel", "ign", "installer", "powershot", "bestselling", "insure", "packaged", "behaviors", "clarify", "activate", "tg", "pv", "sandisk", "vitro", "cosponsors", "hyatt", "burundi", "demos", "btw", "psychiatric", "tittens", "teenagers", "grading", "valentines", "vonage", "wetlands", "quicktime", "underwater", "pbs", "vanuatu", "erotik", "supportive", "vw", "targeting", "preschool", "dw", "hm", "jl", "hg", "megapixel", "booklet", "cancun", "reimbursement", "turnover", "cheryl", "radeon", "italicized", "chromosome", "optimized", "ffl", "upgraded", "colorful", "popup", "mk", "garnet", "ppp", "oceania", "formulation", "fresno", "handbag", "bypass", "ies", "logout", "boyfriend", "hogtied", "wl", "clipart", "detectors", "newsgroups", "spectra", "mailbox", "athlon", "iq", "landscaping", "mol", "korn", "directv", "viable", "deviantart", "qa", "hunks", "appellant", "xsl", "lithium", "ctr", "planting", "alphabetically", "facials", "calories", "airways", "refill", "reagan", "kazaa", "einstein", "pornstar", "vcd", "jumper", "majors", "headsets", "toxicity", "sz", "denim", "greenville", "scat", "neighborhoods", "buick", "slipknot", "mst", "residual", "bf", "bash", "ngos", "storesshop", "postgraduate", "daytona", "wastewater", "constructor", "technicians", "debbie", "issuance", "sj", "mbps", "nationals", "ij", "alito", "waterfront", "diagnosed", "biotech", "turkmenistan", "woodland", "iranian", "unsecured", "kyoto", "cis", "eb", "barcode", "xd", "regulator", "txt", "postcode", "makefile", "ansi", "vicodin", "shawn", "suv", "lacrosse", "crafted", "eritrea", "bbq", "wh", "debit", "dmx", "edits", "unwanted", "xr", "bn", "noaa", "lemma", "kyrgyzstan", "sensing", "postgresql", "kbps", "trac", "dolby", "ecosystem", "pkg", "dashboard", "nikki", "technorati", "esl", "alzheimer", "jk", "wk", "handler", "semantic", "globalization", "atv", "vga", "atari", "sch", "reebok", "mfg", "jb", "blogthis", "inspirational", "wilmington", "faso", "sdram", "motherboards", "blk", "inherent", "jw", "tailored", "vodafone", "romanian", "xt", "ucla", "celeb", "assoc", "palo", "usability", "backyard", "novell", "refunds", "newsroom", "tina", "kia", "taxpayer", "fb", "cola", "boise", "bsd", "saab", "refinancing", "cert", "buffy", "doctoral", "backpack", "npr", "identities", "tajikistan", "sheraton", "snacks", "booster", "taxable", "imc", "ufo", "linksys", "dentistry", "renal", "fedora", "nyse", "guideline", "freezer", "pcr", "bnet", "binoculars", "demographics", "enroll", "daemon", "buddies", "kc", "crashes", "outlines", "steroids", "pogo", "konica", "hotline", "amps", "accountants", "coefficient", "transvestite", "upstream", "digg", "ladyboy", "hussein", "biochemistry", "duplication", "scottsdale", "ninja", "tj", "avalon", "voucher", "tw", "wheelchair", "gw", "epidemiology", "pentagon", "diabetic", "stressed", "libdevel", "dvi", "biomedical", "gameboy", "subset", "gucci", "https", "websphere", "cheney", "zombie", "recycled", "followup", "nih", "hdd", "bidders", "simulator", "exporters", "ninth", "mutant", "ssh", "authoring", "specializes", "irvine", "olds", "ramp", "jakarta", "tl", "pgsql", "malls", "jensen", "impairment", "scooter", "wap", "mcgraw", "lr", "cheerleader", "edu", "lotion", "substrate", "mmc", "ashanti", "homemade", "ukrainian", "freshwater", "topical", "rms", "isdn", "coded", "alcatel", "suriname", "parkway", "femdom", "palau", "duff", "ck", "bonuses", "scam", "biking", "microsystems", "timeout", "aerosmith", "resellers", "portfolios", "ops", "semantics", "scarface", "beige", "auditing", "rolex", "amplifiers", "coli", "executable", "pentax", "restart", "overstock", "eps", "hmm", "explores", "torque", "memberships", "renting", "icann", "ticketmaster", "cdc", "meridia", "hsn", "oncology", "nf", "woven", "bloglines", "audioslave", "wikimedia", "lipitor", "remodeling", "redhat", "enom", "haha", "coordinating", "holistic", "salsa", "encarta", "childcare", "dvr", "cdn", "soundtracks", "napster", "wong", "debugging", "rechargeable", "engineered", "jerseys", "pw", "superstore", "hex", "wg", "blogroll", "evite", "micronesia", "dreamweaver", "diets", "sauna", "multiplayer", "crt", "caicos", "qaeda", "shareholder", "kitts", "tivo", "deletion", "ptr", "macau", "mudvayne", "ceramics", "freestyle", "organizers", "smartphone", "cmd", "hypertension", "searchable", "aguilera", "servicing", "counselling", "ecards", "acura", "clit", "cops", "fedex", "snowboarding", "laserjet", "cooker", "lego", "microbiology", "internships", "sgh", "vectors", "craigslist", "hamas", "shane", "heaters", "rdf", "bj", "visualization", "newswire", "hf", "spermshack", "brokerage", "overtime", "staind", "wd", "sourcing", "filings", "boeing", "sizing", "exceeded", "presley", "godsmack", "labeling", "whois", "paradigm", "msc", "linguistics", "snmp", "standardized", "liu", "gta", "nutrients", "kosovo", "barbuda", "napa", "abt", "nickelback", "lj", "nazi", "jenna", "arrays", "syllabus", "rgb", "rodriguez", "animations", "activism", "fargo", "chairperson", "reged", "leverage", "sgt", "anguilla", "radisson", "apc", "hitler", "handset", "vulnerabilities", "pga", "activist", "palestinians", "ldap", "prerequisites", "maintainer", "benq", "lx", "bv", "knoxville", "mentoring", "pak", "mos", "didnt", "classrooms", "residency", "deadlines", "tk", "bookshop", "nonetheless", "hifi", "gf", "forex", "diagnostics", "ew", "dreamcast", "tumors", "vm", "kyocera", "nudes", "rationale", "hubs", "pasadena", "bissau", "subway", "hpa", "fgets", "citrus", "cameltoe", "reuse", "sightseeing", "therapies", "widget", "renault", "comoros", "suede", "selector", "gop", "diaper", "hotwire", "ngo", "pvt", "atp", "subtotal", "coefficients", "duplex", "mvp", "jh", "analyzer", "charset", "clin", "nutrient", "zhang", "underway", "govt", "cbc", "excerpts", "formatted", "gorillaz", "inhibitors", "uu", "prestigious", "deploy", "gameplay", "autism", "taxpayers", "martinez", "bombing", "wwe", "metrics", "winxp", "inability", "goo", "coronary", "bldg", "mediated", "prom", "scans", "vaginal", "isps", "rookie", "theatrical", "interdisciplinary", "kerala", "enzymes", "analytics", "jacuzzi", "lesbianas", "parser", "razr", "jt", "styling", "snack", "weezer", "randomly", "semiconductors", "coca", "acs", "peugeot", "bollywood", "mentally", "horoscopes", "noun", "xmas", "silicone", "cpa", "dn", "scoreboard", "proliferation", "squid", "hw", "customised", "trilogy", "hike", "imdb", "clic", "ars", "pharmacist", "marley", "typepad", "xs", "deliveries", "recruiters", "screaming", "cygwin", "gprs", "png", "pornography", "robotics", "chopped", "contexts", "init", "svn", "oslo", "foreclosures", "audits", "pesticides", "fave", "residues", "ashlee", "viet", "orbitz", "invasive", "helsinki", "hardback", "vuitton", "nextag", "inconsistent", "narnia", "alfa", "twp", "geoff", "rename", "atx", "markup", "breakthrough", "ietf", "beneficiaries", "copier", "uncategorized", "xm", "geforce", "defaults", "foreclosure", "clarification", "espresso", "hendrix", "homeowner", "mib", "tees", "glu", "winnt", "tec", "hydro", "nonlinear", "spokane", "playa", "gh", "csi", "radioactive", "desserts", "doi", "socio", "pcmcia", "grooming", "validate", "nederlands", "bst", "filmography", "outerwear", "parse", "dsp", "implementations", "attendees", "toc", "downstream", "webcasts", "accelerator", "masterbating", "flyers", "tacoma", "radiology", "locals", "mms", "tungsten", "typed", "desc", "datasheet", "shutdown", "xenical", "computerworld", "tattoos", "peptide", "sweatshirt", "hassle", "regents", "gn", "docket", "dll", "elsevier", "nordic", "privat", "geometric", "taxonomy", "deli", "intern", "nsf", "sata", "xxxx", "megan", "allergies", "bangalore", "clutter", "predator", "xlibs", "belgian", "adolescents", "djs", "coventry", "clamp", "pricegrabber", "cloning", "args", "madden", "smugmug", "visually", "alright", "laguna", "limo", "aligned", "pesticide", "transformers", "avid", "outpatient", "lam", "encrypted", "wholesalers", "coldfusion", "dcr", "shooter", "switchboard", "vince", "fluorescent", "cookware", "lavigne", "param", "environmentally", "gradient", "ncbi", "inserts", "kvm", "programmable", "bibtex", "chemotherapy", "vr", "dysfunction", "livejournal", "diazepam", "rodeo", "sampler", "jovi", "timetable", "corrosion", "positioned", "checker", "workstations", "cathy", "darren", "cmp", "udp", "sts", "milfseeker", "sbc", "midland", "synchronization", "informatics", "oakley", "rants", "tarot", "didrex", "brenda", "purdue", "figurines", "footer", "maternal", "jedi", "seamless", "ghetto", "thr", "panty", "subunit", "aires", "commercials", "regulators", "influential", "carlson", "yy", "benchmarks", "ug", "emi", "retrieving", "reactor", "kiribati", "telnet", "biker", "parked", "financials", "peanut", "converters", "nauru", "dishwasher", "rcs", "neurons", "ios", "feminist", "yds", "ive", "ecosystems", "gadget", "cctv", "leukemia", "deco", "ticker", "habitats", "remover", "incorporates", "brasil", "unicode", "prod", "spreadsheet", "lowering", "discography", "encoded", "researching", "pediatrics", "sushi", "asap", "onsite", "mapquest", "deleting", "compilations", "therapists", "appealing", "lifestyles", "dst", "swimwear", "applet", "pricetool", "threesomes", "quinn", "daewoo", "antigen", "ultrasound", "mgmt", "procedural", "cern", "macros", "msa", "aussie", "advisories", "lendingtree", "belmont", "acad", "bilingual", "barbecue", "localization", "customization", "gigs", "indexing", "lori", "spacecraft", "ivoire", "montserrat", "telecommunication", "coatings", "eureka", "pcb", "sdk", "preparedness", "systemic", "playoffs", "adaptors", "forecasting", "specialize", "drm", "enya", "masterbation", "tubing", "bloomington", "conditioner", "plaintiffs", "vanessa", "nucleotide", "bronx", "listmania", "middot", "netgear", "panda", "crc", "symbian", "emailed", "chf", "constants", "clr", "isuzu", "webring", "redirect", "interoperability", "msrp", "tuvalu", "shampoo", "neoplasms", "artifacts", "vac", "pseudo", "dinar", "carat", "microphones", "nobel", "galaxies", "verlag", "scrapbook", "dummies", "magnesium", "pagina", "kenwood", "roundup", "imac", "faxes", "plump", "uss", "wwii", "methyl", "campuses", "ramada", "tesco", "dba", "architectures", "acdbline", "getty", "cdr", "msi", "prog", "firewalls", "tester", "polling", "fifa", "bins", "consumables", "highbeam", "msdn", "statistically", "mps", "agp", "cont", "adverts", "programmed", "lohan", "unclear", "aromatherapy", "nederland", "stockton", "clearwater", "trustpass", "topology", "airborne", "antennas", "sundance", "lifecycle", "dhcp", "trucking", "iraqis", "shortcut", "racist", "profitability", "unc", "fairmont", "globally", "aaliyah", "reboot", "newsgroup", "audiovox", "phuket", "jf", "metabolic", "sarasota", "billed", "lim", "toons", "danielle", "exc", "relied", "mesothelioma", "trafficking", "eff", "bizjournals", "michele", "kk", "cutie", "creampie", "seoul", "printf", "columnist", "transplantation", "jerome", "nwt", "rammstein", "scrapbooking", "sequential", "uniquely", "goodies", "auth", "gina", "sugababes", "rsa", "rcw", "whistler", "airfares", "huntsville", "ths", "layouts", "servicemagic", "herpes", "newsgator", "contractual", "akron", "bh", "rebounds", "compressor", "samantha", "khz", "webmail", "carcinoma", "taipei", "stance", "aps", "kumar", "gemini", "kinky", "supervisory", "ostg", "kl", "chiropractic", "throughput", "netbsd", "misplace", "serviced", "opener", "vaccines", "jigsaw", "jumbo", "unspecified", "jsp", "turbine", "percentages", "lett", "maths", "probes", "frustration", "americana", "complexes", "varsity", "insurer", "croatian", "multicast", "certifications", "pradesh", "px", "proton", "allegedly", "kaplan", "linens", "roast", "testers", "debuginfo", "complainant", "inhibitor", "knowledgeable", "jimi", "hummer", "telefonsex", "putative", "hyperlink", "presario", "motorsports", "getaway", "robbins", "kimberly", "unsure", "dinosaur", "tac", "ashland", "dlp", "royce", "sophomore", "antibiotics", "landfill", "warehousing", "filesize", "celebrex", "verisign", "registrations", "wavelength", "slashdot", "transvestites", "cheerleaders", "friedman", "coolpix", "blocker", "tawnee", "hud", "mov", "entrepreneurship", "percentile", "linkage", "lh", "ripper", "afp", "kd", "accomodation", "mcafee", "counselors", "competitiveness", "burger", "microscopy", "hyper", "madthumbs", "linkin", "gmail", "utf", "scooters", "reserveamerica", "organisational", "ezine", "reactive", "clipboard", "gamer", "alexa", "pollutants", "directorate", "savvy", "uploads", "terri", "norms", "implants", "alibaba", "hormones", "hype", "addr", "nfs", "urinary", "institut", "condoms", "directives", "zelda", "fetal", "dong", "reportedly", "edi", "kudoz", "replay", "flavors", "ig", "quickcheck", "ziff", "placebo", "lotto", "textures", "pid", "dep", "seagate", "nanotechnology", "toggle", "emc", "spacing", "frameworks", "mergers", "filtration", "gpa", "cpus", "incremental", "corr", "sbin", "scalable", "ji", "intra", "wetland", "olson", "methodologies", "fremont", "someday", "sha", "exporter", "mri", "hum", "ifdef", "killers", "multicultural", "lasers", "dataset", "savers", "powerpc", "steelers", "enhances", "fucks", "relational", "graffiti", "cassettes", "pussies", "doesnt", "tiff", "cnc", "refrigeration", "houghton", "countdown", "decker", "natl", "extern", "enron", "codec", "broadcasts", "checksum", "directional", "breeders", "lethal", "decals", "macs", "archival", "seismic", "baccarat", "mommy", "teenager", "smokers", "declining", "lineup", "hotspot", "bellevue", "hj", "req", "gigabit", "worksheet", "allocate", "aftermath", "roach", "continuum", "feng", "pep", "nylons", "chipset", "msnbc", "hillary", "factual", "carisoprodol", "tutoring", "spectroscopy", "gemstone", "psc", "phonephone", "unregistered", "moto", "gonzalez", "dior", "pops", "osha", "goldberg", "preteen", "bonding", "insurers", "prototypes", "proactive", "issuer", "sponsoring", "malaysian", "easton", "sentencing", "bulldogs", "worthwhile", "ideology", "cervical", "tallahassee", "userpic", "attribution", "acta", "yep", "iec", "differs", "starters", "uml", "bur", "kris", "sizeof", "spi", "regs", "shinedown", "standby", "arin", "unisex", "wallets", "identifiable", "ethanol", "cannabis", "rsvp", "dynamically", "grenadines", "constr", "subtitle", "librarians", "manson", "autocad", "powerbook", "swinger", "infiniti", "ppl", "williamsburg", "supp", "snyder", "budgeting", "backpacks", "resale", "mikes", "scalar", "unresolved", "hep", "seiko", "electromagnetic", "arial", "tos", "zoofilia", "hcl", "validated", "sco", "annotate", "joomla", "helix", "sx", "env", "biomass", "phs", "hierarchical", "lesions", "financed", "surnames", "reconditioned", "allergic", "rk", "abn", "eliminates", "addict", "matte", "melanie", "secunia", "metering", "genetically", "zebra", "runway", "admits", "chennai", "ions", "asshole", "faroe", "glendale", "speedway", "sweatshirts", "yay", "activex", "logon", "recruiter", "popcorn", "espanol", "disadvantaged", "trong", "niue", "ux", "supermarket", "mfr", "boo", "hmmm", "genomic", "helpdesk", "refuses", "afb", "adhd", "avian", "exe", "visas", "matrices", "anyways", "xtreme", "etiology", "tcl", "mellon", "webmd", "personalised", "hospice", "zerodegrees", "qos", "exhibitor", "sportswear", "recap", "toddlers", "astro", "chanel", "jabber", "hgh", "hx", "rotate", "fema", "subwoofer", "amortization", "neurology", "ack", "radiator", "competencies", "hotspots", "trainee", "nielsen", "podcasting", "centennial", "tuna", "bluegrass", "wipe", "acronyms", "autographed", "loader", "latency", "themed", "messy", "dmc", "ments", "empowerment", "replacements", "subtitles", "gcse", "acupuncture", "workload", "highlighting", "grassroots", "gentoo", "redevelopment", "cellphone", "sax", "triggered", "frontgate", "routinely", "asc", "uploading", "managerial", "nsu", "celine", "finepix", "wks", "tonnes", "hypermail", "thunderbird", "investigative", "letras", "bylaws", "wmv", "lao", "facesitting", "breastfeeding", "mccartney", "anglo", "kathryn", "randomized", "motivational", "gratuite", "gerry", "kappa", "neuroscience", "blender", "blaster", "remediation", "decoder", "genocide", "heathrow", "indy", "pantera", "sidebar", "authored", "snoop", "winery", "rbi", "photon", "overlay", "rusty", "pharma", "fayetteville", "champaign", "fyi", "xc", "pakistani", "ics", "apa", "bitches", "urbana", "diagnose", "secsg", "franco", "announcing", "trivium", "amature", "showroom", "cx", "swarovski", "liter", "akon", "brendan", "condosaver", "amex", "classicvacations", "blackpool", "fh", "inuyasha", "nominees", "cuz", "viewsonic", "dryers", "fujifilm", "ams", "hallmark", "counterparts", "paced", "engl", "asians", "seether", "milestones", "parkinson", "mclean", "checkboxes", "lobbying", "mgm", "cinemas", "islander", "encoder", "importers", "impressum", "phe", "maroon", "kontakt", "ers", "kawasaki", "licences", "bose", "fountains", "clones", "crossover", "situ", "specificity", "runoff", "osteoporosis", "approvals", "bea", "jukebox", "nexus", "cancers", "tango", "melting", "garner", "aba", "karate", "qb", "optimizing", "switchfoot", "coldplay", "vioxx", "tty", "bsc", "celexa", "guitarist", "symmetric", "kuala", "bbb", "geeks", "jg", "repec", "insightful", "unrated", "diva", "adsense", "exemptions", "integrates", "csa", "bookstores", "cimel", "hvac", "leica", "agendas", "nws", "busch", "armani", "bipolar", "menopause", "inbound", "shortlist", "gainesville", "tiava", "eclectic", "headphone", "regimes", "readme", "binder", "xemacs", "helicopters", "ngc", "intercontinental", "workspace", "customizable", "softcover", "realtime", "electrons", "subsystem", "appl", "kinetic", "caffeine", "xf", "nib", "httpd", "slac", "calorie", "graphite", "stroller", "bowel", "sweaters", "mafia", "futuna", "predictable", "susceptible", "insest", "skyline", "sulfur", "scams", "lipid", "tao", "quot", "ritz", "networked", "localhost", "cabling", "stills", "perimeter", "biased", "cardiology", "playoff", "sti", "chiang", "payload", "merrill", "oldsmobile", "grilled", "misty", "conserved", "searchsearch", "rewrite", "vending", "keygen", "janeiro", "heh", "transexuals", "prentice", "cumbria", "diaz", "vegan", "congressman", "recombinant", "ubuntu", "superstar", "closeout", "corel", "kayaking", "synergy", "eta", "backpacking", "accidentally", "bonded", "sticking", "dudley", "osama", "oprah", "inflatable", "beers", "glassware", "amc", "kos", "coursework", "kayak", "mayotte", "repetitive", "gears", "orbital", "musicals", "lithuanian", "amatuer", "profiling", "reps", "hn", "sequencing", "panoramic", "deskjet", "rhino", "polynomial", "tau", "nsa", "stakeholder", "signifies", "stochastic", "psu", "santana", "kidding", "swansea", "airmail", "problematic", "roadmap", "ogg", "lesbo", "farrell", "acknowledgements", "tnt", "skincare", "heroin", "mandated", "workbook", "xslt", "hogan", "omg", "sulfate", "timeshare", "oldies", "complaining", "debra", "cdrom", "cle", "thrillers", "fortran", "timeless", "spouses", "vv", "ninety", "tyr", "cues", "bioinformatics", "chung", "subpart", "scheduler", "hypnosis", "kat", "cornerstone", "recycle", "sos", "lsu", "gao", "applicability", "volatility", "uid", "hoteles", "fav", "disneyland", "umd", "gdb", "bro", "offs", "listserv", "fab", "cond", "tokelau", "conformance", "diecast", "bittorrent", "frankie", "oa", "iu", "vf", "alprazolam", "collaborate", "positives", "hunk", "allocations", "lymphoma", "rpc", "freebies", "frontline", "thb", "tele", "imap", "winamp", "stoke", "idg", "polymers", "grills", "phat", "zz", "escrow", "lumpur", "dds", "infospace", "surfers", "kauai", "licensors", "cpc", "stresses", "webhosting", "peoria", "peek", "alr", "ipsec", "bournemouth", "sudoku", "undef", "campground", "sars", "cme", "predictive", "vlan", "aquaculture", "sendmail", "redesign", "nitro", "jackpot", "cortex", "entitlement", "secs", "mixers", "accountancy", "policing", "michaels", "ecc", "kj", "similarities", "kv", "hipaa", "neutron", "duluth", "dogg", "folklore", "dimm", "acoustics", "pensacola", "crs", "condominium", "wildcats", "exhibitors", "ssi", "redwood", "invoices", "tyres", "westwood", "gly", "estonian", "bomber", "songwriter", "shania", "coaster", "typedef", "strippers", "macmillan", "aac", "woodworking", "cbd", "pricerunner", "afl", "catalytic", "bethesda", "privatization", "sourceforge", "sanford", "membranes", "testosterone", "nunavut", "biochemical", "lennon", "suitability", "lara", "kx", "invitational", "handcrafted", "aftermarket", "fellowships", "freeway", "digitally", "hatchback", "rfp", "coa", "subclass", "rutgers", "sampled", "deploying", "interacting", "roanoke", "treadmill", "fiberglass", "osaka", "personalize", "broncos", "jorge", "classifications", "diggs", "rafting", "sle", "jv", "safaris", "contaminants", "scr", "mitch", "mailer", "liners", "asheville", "quinta", "kristin", "bistro", "lw", "voodoo", "caching", "volts", "excalibur", "bots", "sinatra", "interpersonal", "traumatic", "ringer", "zipper", "meds", "briefings", "siblings", "adversely", "pitcairn", "pdb", "onboard", "nucleic", "telecoms", "hehe", "celeron", "lynne", "invariant", "challenger", "redistributed", "uptake", "newsweek", "geared", "svc", "prada", "tycoon", "maxtor", "plone", "dcp", "biochem", "pte", "ors", "compactflash", "antibiotic", "vanderbilt", "cps", "overweight", "metasearch", "taliban", "maureen", "trekking", "coordinators", "digi", "shoreline", "westin", "middleware", "mips", "roundtable", "dementia", "levine", "ripencc", "shoppy", "filesystem", "pow", "docking", "guidebook", "atreyu", "kylie", "pilates", "backstreet", "packers", "localized", "lic", "docume", "xy", "fte", "stl", "yd", "archiving", "disconnect", "multilingual", "gsa", "immunization", "ciara", "cumming", "interviewing", "categorized", "cmos", "transmissions", "receivable", "ronnie", "implant", "playlists", "thematic", "brentwood", "correctional", "katz", "jojo", "buffers", "talkback", "servings", "kobe", "baylor", "otc", "frustrating", "ssa", "zeta", "dinnerware", "sclerosis", "emotionally", "carbohydrate", "estrogen", "odbc", "ipods", "openbsd", "federated", "shui", "rockford", "staging", "statistic", "torino", "schizophrenia", "predators", "mpi", "adhesives", "inventories", "uf", "brokeback", "dumping", "ow", "econ", "footjob", "warez", "magenta", "tagging", "overly", "triggers", "constructs", "impedance", "dragonfly", "underoath", "refundable", "hbo", "billboard", "huang", "sportsbook", "layered", "neurological", "subs", "watchdog", "starbucks", "ibook", "viability", "kh", "filler", "smiley", "genomics", "yi", "yum", "researched", "copiers", "ovarian", "airplanes", "cello", "wlan", "sweepstakes", "antigens", "midtown", "stabilization", "kinetics", "cocos", "impacted", "rumsfeld", "beanie", "thurs", "spaced", "freq", "segmentation", "soaps", "courthouse", "entrepreneurial", "lebanese", "psycho", "maharashtra", "ricoh", "nrc", "chavez", "asst", "overload", "vikings", "kanye", "bootstrap", "wtf", "humane", "scm", "travelocity", "fno", "twink", "nortel", "koh", "affiliations", "pussycat", "appropriated", "escherichia", "mallorca", "reversible", "spd", "oj", "unclassified", "bookshelf", "htdocs", "fps", "initialization", "expat", "raider", "farmington", "timers", "enrolment", "glibc", "lawmakers", "larson", "photosmart", "centrally", "acl", "luv", "dealership", "eyewear", "bakersfield", "decal", "addictive", "clarinet", "fiona", "vn", "gigabyte", "dbz", "rainforest", "federally", "macos", "multinational", "pornstars", "nope", "evo", "aspirin", "spoilers", "machining", "malibu", "gatwick", "shaun", "redundancy", "emo", "detox", "skateboard", "automate", "drosophila", "branson", "ortho", "appraisals", "flashes", "lakewood", "drupal", "prac", "carers", "kramer", "usaid", "idc", "keypad", "richland", "microbial", "adc", "caregivers", "quark", "zyban", "electronica", "mitochondrial", "grinder", "angie", "octet", "wj", "cre", "dinosaurs", "mccoy", "vibe", "snapshots", "ubc", "meth", "trendy", "inpatient", "filming", "fread", "backend", "cartier", "ageing", "containment", "keynes", "protections", "aliases", "maximizing", "handsfree", "tomcat", "walmart", "interestingly", "jules", "ernie", "elem", "organisers", "pissed", "nite", "mckenzie", "lenox", "darussalam", "genital", "mcse", "cajun", "csu", "algebraic", "astm", "kristen", "fsa", "sgd", "chromatography", "overdose", "nad", "gallagher", "mueller", "cao", "ladyboys", "orgasms", "plantronics", "ftd", "freezers", "ibiza", "reese", "digimon", "gastrointestinal", "inspiron", "pagerank", "asm", "smb", "contrib", "blu", "matlab", "netware", "bse", "megapixels", "retriever", "svalbard", "pixar", "dhtml", "winme", "func", "gamespy", "standalone", "antitrust", "equine", "bros", "proto", "jared", "tehran", "dal", "anesthesia", "filemaker", "libtool", "wrongful", "signage", "psy", "encode", "admins", "moc", "dau", "alvin", "accolades", "raton", "stefani", "infertility", "servlet", "collage", "aces", "depeche", "benchmarking", "xxl", "teleflora", "bankruptcies", "gauges", "blueprint", "mccain", "spiderman", "bridging", "flick", "datum", "canceled", "empowering", "ymca", "facilitator", "bos", "macworld", "wwf", "galveston", "rockville", "banff", "smc", "lq", "serv", "ipo", "tek", "ipc", "timestamp", "musica", "bib", "stevie", "rivera", "dermatology", "sandbox", "mdt", "pinkworld", "cambridgeshire", "premiership", "luton", "conftest", "recursive", "registerregister", "fluorescence", "kosher", "additives", "marketed", "mandrake", "camper", "cpr", "liquidity", "lasik", "galactic", "merchandising", "ombudsman", "registrant", "firefighters", "placements", "ih", "elec", "levin", "academia", "amiga", "descriptor", "pimp", "gimp", "cyclic", "swimsuit", "morphology", "versace", "printprinter", "condom", "westerns", "dodgers", "litre", "correlations", "textual", "handsets", "gandhi", "inks", "diarrhea", "seahawks", "mondays", "insertions", "itk", "kms", "couture", "ativan", "summarize", "savesave", "laminated", "citrix", "backups", "turismo", "animalsex", "mayhem", "washers", "grep", "xeon", "polymerase", "optimisation", "easyshare", "cvsroot", "joplin", "dialup", "nx", "thn", "afro", "biosynthesis", "prosecutors", "alloys", "getaways", "miquelon", "wonderland", "zine", "conn", "truman", "jin", "asynchronous", "carla", "messageslog", "clearinghouse", "dwi", "facilitates", "specialised", "ramones", "everquest", "bernstein", "skis", "calc", "marketers", "itc", "lipstick", "brennan", "kpx", "saturation", "stamford", "alamo", "comcast", "hyderabad", "attn", "spaghetti", "tues", "boogie", "abramoff", "ean", "fla", "utilizes", "lesbos", "fasteners", "sakai", "lk", "rajasthan", "committing", "inlog", "laminate", "earring", "aggregator", "datatype", "postnuke", "ergonomic", "dma", "sme", "kp", "refills", "ibis", "yyyy", "unidentified", "atl", "ims", "tractors", "vx", "spp", "coed", "audiobooks", "sheikh", "gk", "hernandez", "kiwi", "ohm", "truste", "acreage", "mfc", "fingerprint", "sorority", "audition", "mca", "plano", "nmr", "lortab", "leveraging", "psychotherapy", "mso", "htm", "stokes", "lakers", "ats", "saxophone", "cocktails", "steroid", "communicator", "horticulture", "dhs", "resets", "util", "ordinator", "bono", "acronym", "veritas", "breathtaking", "streamline", "crowne", "brunch", "pundit", "figurine", "mutants", "cyberspace", "expiry", "exif", "goldman", "msu", "inning", "fries", "initialize", "tlc", "sybase", "foundry", "toxicology", "mpls", "bodybuilding", "fta", "nostalgia", "acetate", "pls", "bmx", "saratoga", "terminator", "badminton", "cyan", "cory", "stacey", "serif", "portability", "fsb", "yearbook", "lubricants", "cns", "hv", "alameda", "aerosol", "mlm", "clemson", "goin", "philly", "coolers", "multilateral", "costello", "audited", "galore", "aloha", "dehydrogenase", "aq", "gx", "postfix", "fj", "altavista", "exponential", "shi", "gev", "secretarial", "todays", "toaster", "cater", "omb", "bac", "kart", "cpl", "sbs", "putin", "questionnaires", "profileprofile", "serials", "equivalence", "vaughn", "aviv", "condominiums", "schematic", "liposuction", "swf", "apoptosis", "pneumatic", "sniper", "vertices", "additive", "professionalism", "libertarian", "rus", "washable", "normalized", "uninstall", "scopes", "fundraiser", "troll", "teamwork", "auditions", "refrigerators", "redirected", "middletown", "widgets", "ontology", "timberland", "mags", "videogames", "concluding", "vallarta", "chopper", "pinball", "pharmacists", "surcharge", "tbd", "ipb", "latvian", "asu", "installs", "malware", "tsn", "nguyen", "horsepower", "algae", "sarbanes", "alcoholism", "bdd", "csc", "maximal", "prenatal", "documenting", "scooby", "moby", "leds", "mcbride", "scorecard", "gln", "beirut", "conditioners", "culturally", "ilug", "janitorial", "propane", "appendices", "collagen", "gj", "nigerian", "ect", "sto", "makeover", "esc", "dragonball", "chow", "stp", "cookbooks", "spoiler", "ari", "avr", "lamborghini", "polarized", "baroque", "ppt", "jihad", "sharepoint", "cts", "abit", "abnormalities", "qtr", "blogshares", "motorsport", "septic", "citroen", "gz", "predicts", "palmone", "expedited", "curricula", "wmd", "pms", "raped", "configurable", "denon", "sloan", "flawed", "cfs", "checkpoint", "rosenberg", "ffi", "iriver", "callaway", "tcm", "dorm", "lakeside", "marquette", "interconnection", "gilmore", "prc", "taxis", "hates", "gamefaqs", "cookers", "ultraviolet", "afc", "haitian", "dialing", "unicef", "identifiers", "mentors", "steiner", "licensure", "tammy", "tz", "dcs", "soybean", "affirmed", "posix", "brewers", "mci", "retractable", "quickbooks", "townhouse", "stormwater", "sgi", "coco", "pipelines", "rudy", "tia", "congrats", "msds", "arafat", "srl", "splitter", "wai", "standardization", "lakeland", "thiscategory", "classy", "acxiom", "triathlon", "kbytes", "thx", "textured", "doppler", "entropy", "snooker", "unleashed", "lux", "nairobi", "importer", "isl", "orioles", "rotor", "theres", "ttl", "dreamy", "backstage", "qq", "lubbock", "suvs", "bmp", "gasket", "firearm", "dss", "bam", "closures", "participatory", "micron", "budgetary", "pcos", "ssk", "pantie", "bombers", "spongebob", "markus", "ideological", "wellbutrin", "rheumatoid", "swindon", "cabernet", "sek", "dsm", "understandable", "shea", "doctorate", "binaries", "slovenian", "showdown", "simone", "spc", "potentials", "tempe", "hklm", "cores", "borrowers", "osx", "bouvet", "multifunction", "nifty", "unveils", "skeletal", "dems", "oahu", "rollover", "infos", "lds", "thanx", "anthrax", "shockwave", "westlife", "bpm", "tamiflu", "touchdown", "planar", "adequacy", "iomega", "xa", "fetisch", "eastman", "franchising", "coppermine", "ged", "ecard", "ue", "kn", "ferries", "faqfaq", "muller", "fudge", "extractor", "usergroupsusergroups", "svenska", "pcg", "myocardial", "everytime", "callback", "encompasses", "sander", "conductivity", "atc", "vicki", "danville", "sedona", "skateboarding", "lexisnexis", "deepthroat", "outback", "reiki", "biopsy", "peptides", "awakenings", "pim", "sediments", "appraiser", "smp", "gaussian", "hustler", "tensions", "linkages", "separator", "schultz", "adr", "concordia", "recon", "fileplanet", "royals", "globalisation", "borland", "pastel", "nottinghamshire", "strollers", "uninsured", "picasso", "mcgill", "discriminatory", "headquartered", "travelodge", "empower", "hurley", "pedals", "teak", "bitmap", "migraine", "sli", "enum", "lamar", "aes", "methane", "pager", "snp", "aclu", "westchester", "nimh", "quilting", "campgrounds", "adm", "densities", "isd", "tional", "turnaround", "navigational", "stargate", "saskatoon", "cen", "minh", "fingertips", "sba", "rockwell", "vl", "pepsi", "rea", "oversized", "snr", "sibling", "ecs", "burberry", "nrs", "cfa", "inhibit", "pps", "screenplay", "unabridged", "ntp", "endpoint", "labelling", "synchronous", "heartland", "cafeteria", "outfitters", "opp", "homelessness", "opengl", "efficiencies", "blowout", "tickboxes", "oversee", "thresholds", "isnt", "waveform", "deficits", "flair", "applegate", "whitewater", "tableware", "bernie", "workgroup", "clement", "cli", "robotic", "mana", "mississauga", "dialysis", "filmed", "staten", "carole", "schwarzenegger", "summarizes", "sludge", "crypto", "christensen", "heavyweight", "lps", "zach", "pdp", "phantomnode", "comptroller", "scalability", "creatine", "embl", "minimizing", "gpo", "dq", "relativity", "mojo", "econo", "shapiro", "rituals", "pq", "ub", "epoxy", "watercolor", "uncensored", "trainees", "tori", "effluent", "infousa", "storytelling", "polarization", "bombings", "smes", "ionamin", "fuckin", "charlottesville", "xu", "aniston", "barred", "equities", "feeders", "jboss", "mobil", "scrolling", "diode", "kaufman", "aloe", "buckinghamshire", "medford", "underlined", "whores", "gemstones", "bmi", "viewpoints", "exim", "appalachian", "dealings", "phillies", "ramblings", "janis", "centric", "optionally", "nightclub", "geophysical", "fictional", "golfing", "rubin", "handlers", "topeka", "openoffice", "bugzilla", "linus", "taco", "mcsg", "humboldt", "scarves", "mla", "repertoire", "emeritus", "macroeconomic", "gundam", "adaptec", "tailed", "voyer", "hostname", "excl", "bx", "arr", "typo", "merchantability", "autodesk", "jn", "winged", "attacker", "catcher", "haynes", "siyabona", "inverter", "abi", "motivate", "mackay", "bridgeport", "assessor", "fullerton", "cpp", "blockbuster", "dz", "amarillo", "pixmania", "pathfinder", "bonsai", "windshield", "tomtom", "spf", "croydon", "convection", "jdbc", "debugger", "boing", "ancillary", "pointless", "alibris", "factoring", "gyms", "inhalation", "faucet", "bitpipe", "arguably", "techs", "electives", "walkman", "midget", "quan", "commissioning", "experimentation", "saltwater", "cpi", "nis", "wacky", "sgml", "anemia", "biting", "reits", "savanna", "crn", "travestis", "mmf", "cancellations", "paging", "coe", "nudists", "fac", "asean", "airsoft", "bontril", "proliant", "keeling", "zh", "accesses", "jive", "bullshit", "casper", "libstdc", "xpress", "datasets", "webdesign", "nicotine", "comeback", "gannett", "curricular", "downtime", "takeover", "lolitas", "thessalonians", "upto", "joaquin", "transistor", "spotting", "wagering", "everest", "disregard", "hanger", "outkast", "pitbull", "rtf", "fairview", "hires", "alienware", "mainframe", "indo", "compilers", "guinness", "heartbeat", "blazer", "timezone", "merck", "tanya", "bmc", "eia", "colleen", "bbbonline", "participates", "syndicated", "lexicon", "integers", "zirconia", "shortages", "plumbers", "jfk", "raf", "igor", "hama", "patton", "pei", "surfer", "diapers", "eas", "waco", "physiol", "adp", "outbound", "breakout", "fakes", "stderr", "kev", "fomit", "injections", "remortgage", "yogurt", "complies", "workaround", "polytechnic", "uber", "shoppe", "berlios", "csr", "penthouse", "synthase", "pistons", "emule", "sauvignon", "bayer", "carrera", "dvb", "cation", "scientology", "cdma", "maxi", "msm", "rac", "feminism", "topps", "webinar", "dewalt", "turnout", "bruins", "clamps", "firefly", "tabletop", "monoclonal", "wholesaler", "typekey", "partnering", "mage", "sqrt", "israelis", "cdp", "headlights", "monophonic", "proquest", "sergio", "swapping", "mev", "particulate", "bedfordshire", "rockport", "nist", "negotiable", "subcategories", "quarterback", "sudbury", "hectares", "upscale", "scrabble", "sdn", "mta", "docbook", "kiosk", "firstgov", "hoodie", "hoodia", "payout", "clinically", "metacritic", "obligated", "decoding", "presenters", "teal", "epstein", "weblogic", "ity", "covington", "esd", "interconnect", "chinatown", "mindless", "purifier", "kz", "greedy", "rodgers", "gloryhole", "suppl", "hotjobs", "downing", "gnd", "libc", "societal", "astros", "halogen", "wyndham", "osu", "tuesdays", "utp", "superpages", "coaxial", "jpy", "liam", "sesso", "arabidopsis", "argv", "hanoi", "ccm", "faucets", "ballistic", "payouts", "rockin", "supermarkets", "bmg", "nacional", "csv", "telstra", "contraception", "polaroid", "underage", "cardio", "timeshares", "atk", "qi", "logger", "kool", "oki", "birding", "detainees", "indi", "lymph", "barrie", "pollutant", "closeouts", "tolkien", "undp", "jbl", "weekday", "homecoming", "increments", "kurdish", "chromium", "mccormick", "pcm", "confrontation", "shreveport", "grower", "frederic", "unpredictable", "dtd", "capacitor", "burnett", "hilfiger", "mda", "litres", "moroccan", "nightwish", "hess", "wheaton", "motorized", "subgroup", "chevelle", "vets", "assays", "ramon", "longhorn", "backdrop", "aerobic", "vgroup", "thursdays", "dansk", "tenerife", "mayen", "oldmedline", "dunlop", "caa", "modernization", "xe", "fourier", "businessman", "watersports", "lucent", "commuter", "orthopedic", "hhs", "tyrosine", "shenzhen", "initiating", "grabs", "erickson", "marlin", "casserole", "canoeing", "cca", "ophthalmology", "geile", "clubhouse", "licensees", "evaluates", "svg", "protesters", "fernandez", "mvc", "sleazydream", "patti", "mz", "sennheiser", "sheehan", "maven", "commute", "staged", "transgender", "customizing", "subroutine", "pong", "hertz", "myr", "bridgewater", "firefighter", "propulsion", "westfield", "catastrophic", "fuckers", "blower", "tata", "giclee", "groovy", "reusable", "actuarial", "helpline", "erectile", "timeliness", "obstetrics", "chaired", "agri", "repay", "prognosis", "colombian", "pandemic", "mpc", "fob", "dimage", "fetus", "determinants", "durango", "noncommercial", "opteron", "superannuation", "ifs", "haas", "wimbledon", "documentaries", "mpa", "rao", "remake", "arp", "braille", "physiopathology", "seperate", "econpapers", "arxiv", "pax", "kalamazoo", "taj", "sinus", "maverick", "anabolic", "allegra", "lexar", "videotape", "educ", "amplification", "larsen", "huron", "snippets", "conserv", "dustin", "wsop", "composites", "wolverhampton", "banning", "cpt", "gauteng", "ftc", "watertown", "pathogens", "mft", "uefa", "jacking", "radiohead", "ooh", "subsections", "definately", "bod", "yin", "tiki", "homepages", "handouts", "cpm", "marvelous", "bop", "asnblock", "stretches", "biloxi", "indymedia", "clapton", "beyonce", "smf", "nabble", "intracellular", "infoworld", "boyz", "waltham", "geisha", "dblp", "briefcase", "mcmahon", "cq", "mcgregor", "modal", "marlboro", "grafton", "phishing", "addendum", "foia", "kirsten", "yorker", "memberlistmemberlist", "gam", "intravenous", "ashcroft", "loren", "newsfeed", "carbs", "yakima", "realtones", "xtc", "vdata", "interpro", "engadget", "tracey", "wac", "darfur", "fragmentation", "behavioural", "kiev", "paranormal", "glossaries", "sonyericsson", "dex", "emoticons", "carbohydrates", "hms", "norwood", "appetizers", "webmin", "stylesheet", "goldstein", "wnba", "englewood", "asf", "hottie", "stripper", "pfc", "adrenaline", "mammalian", "opted", "meteorology", "analyzes", "pioneering", "ctx", "spreadsheets", "regain", "resize", "medically", "tweak", "mmm", "alicante", "graders", "shrek", "universidad", "tuners", "slider", "cymru", "fprintf", "irq", "dads", "sdl", "ebusiness", "hays", "cyrus", "courtroom", "baht", "relocating", "synth", "filthy", "subchapter", "ttf", "optimizations", "infocus", "bellsouth", "sweeney", "aca", "fpo", "layup", "laundering", "fre", "nazis", "cumfiesta", "newbies", "mds", "piles", "vaginas", "bezel", "avatars", "twiztid", "facilitation", "ncr", "xb", "voc", "rts", "applets", "pdfs", "cac", "teh", "undercover", "substrates", "evansville", "joystick", "knowledgebase", "forrester", "xoops", "rican", "uptime", "dooyoo", "spammers", "nuclei", "gupta", "tummy", "axial", "aest", "topographic", "westport", "majordomo", "wednesdays", "burgers", "rai", "watchlist", "campers", "phenotype", "countrywide", "affirm", "directx", "resistor", "bhd", "audubon", "commentsblog", "snowmobile", "publ", "cpg", "subparagraph", "weighting", "rectal", "mckinney", "hershey", "embryos", "garages", "sds", "urology", "aforementioned", "rihanna", "tackling", "obese", "melvin", "collaborations", "isolates", "velcro", "worksheets", "avaya", "srs", "wigan", "hua", "abba", "qd", "orig", "huskies", "frey", "iz", "loyola", "gartner", "xda", "strapon", "chaser", "astra", "expasy", "overdrive", "ripley", "phosphorylation", "cfo", "depletion", "neonatal", "qr", "mclaren", "rowling", "vhf", "flatbed", "golfers", "lira", "technics", "damien", "clippers", "spirited", "gv", "staa", "recharge", "openid", "sassy", "demux", "ribosomal", "tdk", "filmmakers", "transnational", "paralegal", "spokesperson", "fha", "teamed", "preset", "iptables", "pocketpc", "nox", "jams", "pancreatic", "tran", "manicures", "sca", "tls", "prweb", "holloway", "cdrw", "plz", "nadu", "underwriting", "rulemaking", "valentino", "prolyte", "millenium", "collectable", "stephan", "aries", "ramps", "tackles", "dsa", "walden", "catchment", "targus", "tactic", "ess", "partitioning", "voicemail", "acct", "shimano", "lingere", "parentheses", "contextual", "qwest", "jira", "cerevisiae", "dyson", "toxins", "camaro", "cryptography", "signalling", "daycare", "murakami", "merriam", "scorpio", "attr", "emp", "ultrasonic", "ashford", "intergovernmental", "paranoid", "dino", "xvid", "dmoz", "ivtools", "barron", "snorkeling", "chilean", "avs", "suny", "gifs", "qualifier", "hannover", "fungal", "ligand", "aust", "peoplesoft", "freelists", "coastline", "omit", "flamingo", "deformation", "orf", "pfizer", "assembler", "renovations", "genbank", "broadcasters", "employability", "noodles", "retardation", "supervising", "freeport", "lyme", "corning", "prov", "dishnetwork", "amg", "claremont", "moo", "cpe", "childs", "bizkit", "blogosphere", "endocrine", "resp", "carlsbad", "ammo", "bling", "chars", "mcguire", "utilisation", "rulings", "sst", "geophysics", "slater", "broccoli", "foreach", "oakwood", "mcgee", "kissimmee", "linker", "tetris", "tds", "synchronized", "hsbc", "shellfish", "astoria", "trajectory", "epsilon", "knowles", "astrophysics", "hansard", "lai", "authorisation", "vampires", "relocate", "nerd", "dac", "glazing", "provisioning", "mnt", "expandable", "maserati", "bender", "reliably", "fas", "sendo", "hasbro", "corba", "polski", "multidisciplinary", "ventricular", "petersen", "bans", "macquarie", "pta", "poy", "mao", "transferable", "yummy", "momma", "lehigh", "concordance", "greenberg", "trish", "electrodes", "svcd", "cron", "darth", "cramer", "yup", "ching", "melanoma", "thug", "yugoslav", "occ", "cpan", "bizjournalshire", "tco", "shaver", "grammy", "fibrosis", "opel", "hummingbird", "ported", "eeo", "polyethylene", "parametric", "awarding", "dkk", "superbowl", "sse", "haskell", "flatware", "skid", "eyeglasses", "fenton", "polaris", "formulations", "bgp", "parenthood", "latinos", "artworks", "doherty", "dnc", "bci", "allegheny", "arenas", "aaaa", "compressors", "exclusives", "lounges", "consultative", "lst", "ais", "conveyor", "normative", "surg", "rst", "longtime", "ecm", "mckay", "spe", "solver", "ani", "lacie", "solvents", "kudos", "jens", "creams", "poo", "handbooks", "agm", "shawnee", "crowley", "butalbital", "artifact", "mdot", "coldwell", "qs", "depts", "veterinarian", "merseyside", "cso", "krona", "disseminate", "puget", "coasters", "geologic", "fleetwood", "feldman", "endocrinology", "replicas", "polygon", "mcg", "kwazulu", "servo", "riparian", "guelph", "tenuate", "curator", "jaime", "mower", "gamestats", "lvl", "faxing", "meyers", "testsuite", "stressful", "extranet", "remastered", "teac", "neg", "rma", "eastwood", "handspring", "gerber", "duran", "aquarius", "stencil", "srp", "scifi", "redirection", "showcases", "hmv", "refinery", "abort", "drs", "schroeder", "indent", "chardonnay", "removals", "antrim", "accelerating", "guesthouse", "bz", "insiders", "duvet", "decode", "looney", "brigham", "mts", "jewelers", "juneau", "dilution", "veterinarians", "colourful", "grids", "sightings", "binutils", "spacer", "microprocessor", "deloitte", "claiborne", "clie", "cdm", "spills", "assistive", "chronograph", "refunded", "sunnyvale", "spamcop", "lovin", "embracing", "minimise", "salinity", "nbsp", "specialising", "handout", "routledge", "ramirez", "haiku", "paisley", "telemarketing", "cutoff", "visuals", "ccs", "breads", "seg", "martina", "mclaughlin", "headlight", "kemp", "sla", "pipermail", "sonneries", "clinicians", "entertainers", "tripp", "peterthoeny", "blockers", "stash", "jamaican", "semen", "endogenous", "memorex", "showtime", "narcotics", "oceanfront", "flange", "realplayer", "mcc", "mpaa", "gogh", "allentown", "romero", "bnwt", "predefined", "buzznet", "melodic", "isi", "naics", "transgenic", "axim", "brookfield", "endorsements", "viscosity", "cve", "bengals", "estimator", "cls", "concurrently", "leafs", "electrician", "mayfield", "ftse", "samui", "bleach", "unauthorised", "wolverine", "individualized", "ecn", "raffle", "shredder", "embedding", "hydrology", "mascot", "lube", "launcher", "mech", "primers", "caregiver", "lupus", "sachs", "qtek", "oy", "twn", "keane", "gator", "memberlist", "utd", "nordstrom", "roseville", "dishwashers", "walla", "remixes", "cozumel", "replicate", "taped", "mcgrath", "biometric", "incubation", "aggregates", "wrangler", "asymmetric", "cytochrome", "xfm", "sps", "shure", "mcs", "donating", "antec", "giveaway", "cmc", "alyssa", "cnt", "renter", "vmware", "patel", "honeywell", "nightclubs", "barrington", "luxor", "caterers", "capacitors", "rockefeller", "checkbox", "itineraries", "reagents", "christoph", "walkers", "eek", "ensembl", "weekdays", "computations", "wineries", "vdc", "booker", "mattel", "diversification", "wsdl", "matic", "xyz", "antioxidant", "esrb", "archos", "semesters", "naruto", "storyline", "melrose", "streamlined", "analysing", "airway", "iconv", "commas", "vicky", "helvetica", "ssp", "submitter", "cambria", "icp", "manifestation", "subsets", "blazers", "jupitermedia", "merritt", "triad", "webpages", "yp", "clinique", "fitch", "charting", "ugm", "fixation", "bsa", "lenovo", "alamos", "leach", "gravitational", "cyrillic", "prevacid", "designee", "sunni", "netflix", "monoxide", "groupee", "hardin", "colorectal", "outage", "chunky", "raptor", "ima", "coulter", "iain", "mtn", "pbx", "quantify", "dmesg", "elfwood", "substitutions", "lancome", "galleria", "inv", "hillsborough", "booklets", "pln", "cin", "msp", "gluten", "spanked", "orthopaedic", "medi", "nrt", "obispo", "minogue", "turbines", "notepad", "crappy", "golfer", "afs", "receivables", "scripps", "livermore", "cirque", "ost", "marxism", "escondido", "diffraction", "aha", "outlining", "subtract", "bosnian", "hydration", "havent", "preferential", "dre", "interns", "quotas", "methodological", "aarp", "gettysburg", "iseries", "menlo", "walkthrough", "bikinis", "aopen", "bookcrossing", "addicts", "epithelial", "drastically", "clarks", "groupware", "matchmaking", "dict", "descriptors", "aeronautics", "radiography", "norsk", "nps", "afr", "expr", "ejb", "refereed", "afi", "toxin", "poynter", "filmmaker", "grounding", "smartphones", "calvert", "fiduciary", "bayesian", "saccharomyces", "cfp", "humps", "osi", "zimmerman", "javier", "romantics", "trimmer", "bookkeeping", "hmo", "hikes", "kickoff", "magick", "hillsboro", "blm", "fractal", "mtg", "guildford", "twill", "therapeutics", "disruptive", "kicker", "protease", "abrams", "moreno", "newsforge", "timex", "duffy", "racers", "cma", "pairing", "kirkland", "gujarat", "dkny", "catfish", "doubletree", "brink", "transex", "tdd", "hotpoint", "anthologies", "retirees", "dcc", "btu", "investigates", "chelmsford", "anonymity", "gotham", "lyle", "pinot", "responsiveness", "gazetteer", "jacobson", "kda", "imitrex", "monash", "binghamton", "connolly", "homology", "rpms", "psychedelic", "gyn", "rhinestone", "ely", "quadratic", "philharmonic", "dynamical", "cantonese", "quran", "turnovr", "keychain", "shakers", "inhibited", "lexical", "openssl", "ugg", "mathematica", "karachi", "missoula", "abilene", "fdid", "snes", "swat", "pune", "trashy", "expended", "webct", "pvr", "handycam", "zn", "strategically", "dms", "anus", "dnr", "deputies", "emergent", "erika", "authenticate", "aligning", "nautilus", "doulton", "rtp", "dracula", "umm", "modding", "eap", "shaman", "letra", "mandriva", "seti", "extracellular", "jaipur", "stockport", "eiffel", "plywood", "dnp", "morbidity", "wimax", "effexor", "binders", "custodial", "combi", "integrator", "sonnerie", "teri", "sectoral", "trombone", "postsecondary", "rbd", "ambulatory", "lookin", "xff", "camouflage", "beckham", "dispensers", "firebird", "qu", "showbiz", "hbox", "waikiki", "lng", "pds", "antiqua", "boxers", "asics", "barbeque", "workouts", "ini", "mrc", "seamlessly", "ncc", "girlfriends", "songbook", "hepatic", "copeland", "swanson", "aquifer", "ldl", "pgs", "xga", "svensk", "stereotypes", "marlins", "shelly", "exiting", "saginaw", "polyurethane", "seks", "textus", "johansson", "spraying", "hamburger", "reactivity", "lieberman", "windchill", "storefront", "eof", "codeine", "tetex", "cheerleading", "wellbeing", "pkwy", "hairdryer", "punitive", "exon", "outsource", "thier", "siebel", "captions", "kf", "chromosomes", "emailing", "manic", "novotel", "ndp", "transmitters", "nicola", "minidv", "collaborating", "tuxedo", "receptus", "michelin", "bicycling", "itt", "blueberry", "schumacher", "socioeconomic", "hamster", "bushnell", "ergonomics", "finalize", "lumens", "sudanese", "softpedia", "iff", "faceplate", "packer", "ibs", "broward", "globus", "pir", "reco", "softcore", "referencing", "typ", "guangzhou", "nader", "militants", "resins", "cougar", "montrose", "surreal", "irradiation", "redesigned", "raster", "credential", "checklists", "quirky", "oscillator", "finalists", "encrypt", "mgt", "sneakers", "incontinence", "pajamas", "murdoch", "dali", "lubricant", "quests", "mgr", "outsourced", "jody", "plasmid", "schiavo", "unbeatable", "upstate", "lymphocytes", "repayments", "transsexuals", "fueled", "mex", "xanga", "sverige", "extrait", "pelvic", "monochrome", "activating", "antioxidants", "gynecology", "mythtv", "probabilistic", "cooperating", "calibrated", "phased", "godzilla", "eweek", "airbus", "simplex", "webhome", "aerobics", "sabrina", "condor", "gated", "gaap", "sasha", "ebayer", "hmc", "bitrate", "karnataka", "amish", "ffm", "duh", "hyperlinks", "clitoris", "hse", "cribs", "reliant", "subcontractor", "fendi", "giveaways", "wah", "psych", "hydrochloride", "magnification", "twelfth", "proponents", "priceline", "ecco", "backpackers", "kohler", "irb", "initialized", "ava", "silverado", "amr", "ecu", "psychiatrist", "lauder", "soldering", "phono", "crd", "daryl", "trp", "lehman", "daihatsu", "grantee", "enhancer", "anglers", "rottweiler", "filefront", "visualize", "psd", "adb", "hoses", "bidpay", "ias", "turntable", "screenings", "pivotal", "pai", "heuer", "fic", "nix", "lineno", "fdi", "provo", "checkins", "plating", "lycra", "planck", "yugioh", "reactors", "npc", "kingsley", "careerbuilder", "gillette", "fluoride", "stacking", "cochran", "suomi", "sissy", "trang", "calculates", "thunderstorms", "cip", "transcriptional", "finalized", "referees", "deerfield", "lsc", "cochrane", "eldorado", "esmtp", "conservancy", "otrs", "omim", "dielectric", "anand", "electrophoresis", "sprinkler", "imbalance", "cine", "scarlett", "xen", "novak", "backcountry", "artistdirect", "outboard", "pitches", "scc", "lockheed", "raj", "iana", "elmo", "unmatched", "scranton", "ixus", "pinpoint", "gabbana", "neumann", "outta", "dieting", "andhra", "ralf", "appraisers", "xenon", "hybridization", "anh", "abercrombie", "trax", "otherosfs", "ssc", "danbury", "nofx", "sharma", "rockers", "palliative", "recieve", "cufflinks", "queues", "relisted", "beep", "dunedin", "remanufactured", "staffed", "lightspeed", "grilling", "stalin", "kaye", "bps", "camo", "shoutbox", "toms", "homeschool", "ccg", "lifehouse", "windsurfing", "pattaya", "relocated", "untreated", "mkdir", "riaa", "divisional", "chihuahua", "mcconnell", "resell", "chandigarh", "centrino", "osbourne", "burnout", "classpath", "designations", "spl", "microwaves", "coliseum", "ephedra", "spawning", "endothelial", "citrate", "eduardo", "snowman", "edmonds", "potty", "microbiol", "shooters", "norwalk", "bacillus", "fk", "cla", "spooky", "belleville", "venezuelan", "cbr", "colby", "pab", "hom", "subpoena", "hons", "interpretive", "bareback", "extender", "glucosamine", "proj", "modesto", "designjet", "typhoon", "launchcast", "referrer", "zhejiang", "ricci", "superhero", "tooling", "tomography", "berman", "vocalist", "tidbits", "cystic", "pacifica", "kostenlos", "anniversaries", "infrastructures", "littleton", "commenters", "cali", "fairway", "postdoctoral", "prs", "fairchild", "ssb", "spinner", "evanston", "homeopathic", "ordinarily", "hines", "cpd", "braking", "ece", "platelet", "messageboard", "setback", "recipezaar", "installers", "subcategory", "markov", "factbook", "tuple", "fibromyalgia", "rootsweb", "culver", "bratz", "bucharest", "ntl", "lacoste", "renters", "timberlake", "zack", "markham", "gels", "iframes", "thinkgeek", "nafta", "advertisment", "mountaineering", "screwdriver", "hutch", "beckett", "homeschooling", "dealerships", "sakura", "byu", "jupiterweb", "phosphatase", "mahal", "killings", "robyn", "adirondack", "casablanca", "sdp", "pulaski", "mantra", "sourced", "carousel", "mpumalanga", "thermostat", "infarction", "polypropylene", "mailboxes", "southend", "maxell", "tundra", "vars", "youngstown", "farmland", "skater", "iep", "imho", "disrupt", "rampage", "fink", "jurassic", "gpg", "gnupg", "aliasing", "comix", "solves", "hiroshima", "jiang", "oscars", "boosting", "knownsite", "macarthur", "powerhouse", "deodorant", "youre", "compulsive", "perky", "reinforcing", "extensible", "mtb", "catheter", "practicum", "photocopy", "zipcode", "mcpherson", "saharan", "pixma", "hubbell", "lesbienne", "timeframe", "disarmament", "aed", "actin", "interviewer", "vms", "wno", "dbi", "waikato", "syslog", "orr", "gastroenterology", "travelmate", "composting", "mackie", "choi", "uva", "fga", "oceanography", "vastly", "stardust", "radiological", "commando", "bathtub", "urdu", "aedst", "greer", "motorway", "repositories", "freaky", "guangdong", "merlot", "civ", "spielberg", "lesley", "thom", "phoneid", "salinas", "legged", "unilateral", "dsn", "shri", "aegis", "colloquium", "matrox", "vk", "springsteen", "uhf", "fatalities", "supplementation", "embodied", "altec", "mohammad", "verbose", "marbella", "sth", "iterator", "recieved", "slc", "cfl", "deterministic", "nci", "predictor", "salmonella", "nga", "nantucket", "viewable", "subnet", "maximise", "lotr", "isn", "chalets", "reimbursed", "lau", "watermark", "totes", "mohamed", "dyslexia", "hubble", "thugs", "organics", "dearborn", "feds", "yiddish", "dopamine", "multiplier", "winzip", "sacd", "payoff", "spv", "sonar", "monticello", "flasher", "subcontractors", "evangelism", "abortions", "lesion", "akira", "progesterone", "ethyl", "earthlink", "caramel", "immunodeficiency", "washburn", "xtra", "capitalized", "ceos", "maint", "pancreas", "octopus", "xena", "neuro", "ara", "receptionist", "cessna", "tru", "zombies", "cambodian", "interagency", "activision", "synchronize", "jenn", "juegos", "titties", "tay", "hornets", "crossfire", "ankara", "spandex", "hdmi", "tamara", "ctc", "capcom", "cato", "peachtree", "handyman", "aeg", "ethic", "harlan", "taxon", "lcs", "indefinite", "slackware", "cougars", "earch", "ambience", "genet", "photopost", "uo", "infor", "neuronal", "carrollton", "checkers", "torrance", "yuma", "spokeswoman", "baccalaureate", "tripods", "logistic", "middlesbrough", "personalization", "enema", "easement", "goalie", "darkroom", "hydrocarbons", "gpm", "hoh", "hla", "donaldson", "tiscover", "recor", "mori", "adi", "rockland", "uniqueness", "hfs", "cascading", "metros", "hangers", "broadcaster", "musculus", "degraded", "topo", "viewcvs", "eisenhower", "flashlights", "myyahoo", "rosenthal", "affordability", "latham", "jailed", "depp", "grapefruit", "trna", "motorbikes", "verdana", "bonita", "nippon", "decorators", "dwl", "jizz", "pendleton", "psoriasis", "mavericks", "dianne", "earnhardt", "amtrak", "resid", "tostring", "lessee", "goodyear", "utica", "overclocking", "kitchenaid", "cbt", "peacekeeping", "oti", "interferon", "aas", "selectable", "chechnya", "rory", "woodbridge", "jas", "intersections", "sma", "capitalization", "epi", "responder", "qv", "thoracic", "phaser", "forensics", "infiltration", "serine", "bing", "schemas", "orthogonal", "ohms", "boosts", "stabilized", "wordperfect", "msgs", "zhou", "selenium", "grinders", "mpn", "cse", "assn", "punches", "masturbate", "parachute", "glider", "chesney", "taos", "tong", "lotions", "adrenal", "sixties", "booting", "cunts", "dri", "ozzy", "elearning", "zx", "valuations", "kidman", "jpn", "postoperative", "cytology", "nye", "biennial", "ifndef", "bq", "circuitry", "cdw", "robb", "kinja", "tweaks", "readership", "northstar", "dif", "worthington", "groundbreaking", "transducer", "serotonin", "complements", "isc", "params", "radiators", "beagle", "cadmium", "bodoni", "speedo", "detachable", "simplifies", "sleeveless", "motorists", "tbsp", "waivers", "forsyth", "ricerca", "agilent", "plumper", "uterine", "apartheid", "bnc", "businessweek", "morphological", "windham", "ellington", "ria", "cdi", "polio", "clp", "sharm", "alvarez", "regatta", "chatroom", "polarity", "overrides", "riff", "widths", "dest", "attenuation", "kluwer", "martins", "italiana", "telford", "shuman", "grapevine", "russo", "daunting", "topples", "futuristic", "autofocus", "chai", "obsessive", "transplants", "referrers", "junkie", "admitting", "alsa", "galactica", "wkh", "rotational", "withdrawals", "pageviews", "hartman", "finalist", "pornographic", "armageddon", "smallville", "selectively", "albans", "fallout", "brownsville", "galeria", "stalker", "kathmandu", "nyu", "kristina", "dps", "icmp", "sophistication", "wrt", "messed", "oceanside", "foxpro", "taiwanese", "officejet", "helens", "ppg", "sym", "combos", "cloned", "fulham", "dahl", "pla", "nfc", "mathews", "bestseller", "enrique", "minidisc", "downside", "malvinas", "honcode", "reissue", "striker", "memos", "tensor", "whitehead", "whoa", "brookings", "accomodations", "integra", "laredo", "nntp", "logiciel", "jaguars", "mga", "tracer", "frist", "lsd", "synthesizer", "ejaculating", "biodiesel", "mcleod", "waldorf", "microfilm", "lear", "subsidized", "simons", "optimizer", "zire", "pituitary", "sow", "repeater", "teamxbox", "bytecode", "mccall", "wiz", "autopsy", "joltsearch", "ym", "itv", "colo", "ying", "bce", "inode", "glenwood", "allstate", "horticultural", "hahaha", "spamming", "ssn", "wartime", "mou", "hpv", "jain", "geriatric", "mayan", "navman", "futon", "grannies", "hairstyles", "nays", "webspace", "rds", "mellitus", "multiples", "cryptographic", "disparate", "boardwalk", "ineligible", "homeopathy", "entrants", "rallies", "simplification", "abb", "insolvency", "roleplaying", "affective", "wilma", "compusa", "histogram", "wheelchairs", "usaf", "pennington", "lesbiana", "liberalization", "insensitive", "greenpeace", "genotype", "contaminant", "informa", "collaborators", "malvern", "proxies", "rewind", "issuers", "sinh", "kerberos", "schoolgirls", "hilo", "stratton", "idx", "astronaut", "instituto", "lowry", "constipation", "aec", "sheryl", "nashua", "ikea", "oswego", "gbr", "koi", "sues", "cba", "mckenna", "eudora", "candida", "sildenafil", "adjusts", "sqft", "pickups", "squaretrade", "chandra", "cheesecake", "oth", "porting", "lubrication", "shootout", "racine", "webserver", "vnu", "fragmented", "chevron", "reinsurance", "slated", "tera", "guantanamo", "reina", "energizer", "clarksville", "vandalism", "acpi", "acetaminophen", "wolfram", "ofthe", "contraceptive", "necrosis", "iva", "bonanza", "lumbar", "disparities", "umass", "flamenco", "osprey", "flammable", "biometrics", "buspar", "wasnt", "nds", "softwares", "dbm", "alchemist", "marr", "ssw", "mcdonalds", "hormonal", "vh", "calender", "distro", "virgo", "rink", "jesolo", "unrealistic", "rhonda", "pov", "pings", "pcp", "inxs", "desy", "teaser", "impairments", "courageous", "rho", "promos", "transceiver", "warhammer", "iterative", "catered", "callahan", "neuron", "xlibmesa", "pulsar", "enewsletter", "dav", "pedagogy", "bcc", "afrikaans", "ecb", "cinematic", "ugh", "malik", "tshirts", "fellowes", "illus", "telefon", "maguire", "nlm", "numeracy", "caviar", "popups", "sleepwear", "quads", "grady", "kelsey", "enforceable", "bouncy", "vcrs", "retinal", "sponsorships", "textrm", "screenwriter", "vendio", "otago", "ducati", "allele", "sylvania", "optio", "purifiers", "commuting", "hiphop", "kato", "kama", "bcs", "keating", "eczema", "northland", "icu", "veg", "roadster", "confetti", "fv", "raptors", "irda", "veggie", "dharma", "chameleon", "hooper", "luciano", "grp", "abrasive", "henti", "koruna", "edp", "ensembles", "backpacker", "bainbridge", "scs", "comfy", "assuring", "gettext", "registries", "eradication", "herefordshire", "ectaco", "doh", "jodi", "quintet", "groupwise", "ambiance", "chun", "damian", "bakeries", "dmr", "fucker", "polka", "wiper", "wrappers", "giochi", "iterations", "svs", "ntfs", "namespaces", "mismatch", "fdic", "icd", "vj", "oxides", "qualifiers", "battered", "wellesley", "smokey", "passwd", "vacuums", "falun", "precip", "lagos", "rapper", "hooters", "calligraphy", "advantageous", "mustek", "monique", "fearless", "ortiz", "pref", "morningstar", "recessed", "fmt", "palladium", "totaled", "levitt", "vd", "shipper", "darryl", "hobo", "nys", "merrell", "cra", "sly", "reductase", "raul", "shenandoah", "harnesses", "wtc", "loma", "oshkosh", "multivariate", "geil", "kitchenware", "unigene", "lans", "immunoglobulin", "silverstone", "uniden", "telechargement", "remstats", "unitary", "getnetwise", "hospitalization", "clubbing", "microelectronics", "observational", "waverly", "crashers", "schwab", "deregulation", "vba", "carpentry", "steinberg", "sweetie", "mideast", "hispanics", "podium", "paranoia", "faceted", "sito", "gecko", "fullscreen", "interchangeable", "rollins", "scp", "hst", "starship", "miele", "seeded", "cyclists", "fey", "cmt", "nurturing", "enzymology", "amadeus", "usm", "galapagos", "uconn", "picker", "xls", "mulder", "lesbicas", "dialer", "mooney", "syntactic", "envision", "jetta", "downey", "codex", "lsb", "userid", "cosmology", "noodle", "gromit", "sargent", "bangle", "humping", "donnie", "privatisation", "tofu", "rq", "unhcr", "battlestar", "intuit", "adoptive", "cda", "minimized", "partnered", "twat", "filibuster", "glamorgan", "adwords", "tulane", "usp", "facet", "behaviours", "redneck", "imax", "xpath", "synthesized", "encapsulation", "samsonite", "accordion", "rooney", "minimally", "webpreferences", "skoda", "matchups", "ucc", "mailings", "ono", "beachfront", "cem", "crosswords", "pubchem", "integrative", "kelowna", "embed", "gurus", "allotted", "shutterfly", "gerhard", "watersheds", "trimester", "clickable", "spyder", "electricians", "nexium", "capricorn", "dipped", "perm", "rte", "spectrometry", "snippet", "pha", "permeability", "waukesha", "igg", "scart", "wsu", "normalization", "skillet", "neoprene", "vlc", "offeror", "thermo", "huber", "jarrett", "farechase", "maintainers", "maarten", "ginseng", "blackout", "detergent", "rosetta", "grenade", "occured", "karin", "lana", "fontana", "kang", "crafting", "ivillage", "mowers", "bratislava", "policymakers", "sienna", "watford", "misco", "givenchy", "reimburse", "esperanto", "modalities", "pcc", "lighters", "shutting", "endemic", "spr", "carly", "hydrologic", "stansted", "nep", "huddersfield", "aimee", "davey", "csp", "helpsearchmemberscalendar", "ait", "transduction", "silverman", "clarifying", "aortic", "drc", "hoa", "starcraft", "martens", "ficken", "structuring", "konami", "lipids", "jurisdictional", "desi", "cellphones", "cordoba", "xj", "sheppard", "dpkg", "folsom", "triggering", "mapa", "aip", "rackmount", "binocular", "eda", "specialise", "rar", "remortgages", "mckinley", "hanks", "dosing", "strobe", "waffle", "detectable", "pmi", "arrowhead", "nigga", "mcfarlane", "paycheck", "sweeper", "freelancers", "seinfeld", "tdm", "shen", "responders", "keepsake", "birthdate", "gettin", "upbeat", "ayes", "amenity", "donuts", "salty", "interacial", "cuisinart", "nautica", "estradiol", "hanes", "noticias", "gmp", "schaefer", "prototyping", "mth", "zeros", "sporty", "tumour", "fpic", "pdc", "atpase", "pooled", "bora", "shu", "stabilize", "subwoofers", "tcs", "clueless", "sofitel", "woodruff", "southport", "walkthroughs", "radiotherapy", "minifig", "transfusion", "sams", "zend", "newtown", "mcmillan", "csf", "lyn", "witt", "mcd", "unep", "newsflash", "recombination", "messing", "budgeted", "slogans", "flashback", "photometry", "sutter", "inr", "knicks", "ingestion", "mindset", "banda", "adulthood", "inject", "prolog", "dunk", "goofy", "mcintyre", "aga", "guilford", "raglan", "photonics", "cdf", "celtics", "heterosexual", "mappings", "jel", "snip", "fascism", "galerias", "audiovisual", "diagnosing", "neutrino", "wouldnt", "mq", "codecs", "certifying", "dvp", "traduzca", "csb", "subj", "asymptotic", "isotope", "moblog", "locales", "preventative", "brampton", "temperate", "lott", "srv", "meier", "crore", "deserving", "banco", "diagnoses", "thermaltake", "ultracet", "cortical", "itchy", "glaucoma", "homosexuals", "mhc", "estee", "wysiwyg", "oversees", "odp", "categorised", "thelist", "diss", "cta", "diamondbacks", "nzd", "subtype", "psx", "thessaloniki", "dmv", "leafstaff", "literate", "ayp", "bikers", "harcourt", "bubba", "mutt", "orwell", "mietwagen", "bakeware", "cleanser", "lonsdale", "velocities", "renewals", "tsx", "dnl", "mtu", "salford", "ephedrine", "longview", "closeup", "venous", "hereunder", "ouch", "teflon", "cys", "debadmin", "cleans", "fpga", "everton", "rosters", "herbicide", "marlene", "futura", "smd", "cheddar", "ql", "tucows", "regex", "bukake", "chs", "mcclellan", "gopher", "distal", "zar", "frommer", "joss", "shortfall", "harmonica", "geothermal", "texmf", "atlases", "kohl", "lorazepam", "hosp", "lewiston", "stowe", "fluke", "khi", "estes", "hdr", "caches", "stomp", "acidic", "anc", "doin", "tld", "gangster", "deliverables", "censored", "fascist", "lido", "matchbox", "trl", "businessmen", "bpo", "incubator", "experiential", "eraser", "jordanian", "jiwire", "libra", "rtl", "iea", "uniprot", "statystyki", "pkgsrc", "nonprofits", "desnudos", "czk", "ethylene", "slows", "opm", "inhibits", "exploratory", "spectrometer", "outsole", "lista", "tmc", "inset", "polynomials", "elegans", "openers", "shasta", "dob", "inet", "cov", "fallon", "sidekick", "tcb", "dmca", "rewriting", "bahama", "idl", "loretta", "lingvosoft", "dax", "allocating", "newell", "juveniles", "gamermetrics", "lcds", "ortholog", "tasmanian", "hydrocarbon", "lobbyist", "kelvin", "secondhand", "xo", "cheatscodesguides", "mdl", "clientele", "technica", "gratuito", "hts", "arkon", "hort", "bureaucratic", "cooperatives", "raceway", "sopranos", "hotties", "gq", "terrell", "yc", "closings", "registrars", "strlen", "faye", "cto", "lakeview", "ospf", "tunneling", "methamphetamine", "murals", "bangs", "asic", "knockout", "radon", "avantgo", "asl", "obi", "timelines", "roget", "cristina", "visio", "autoimmune", "coder", "replicated", "pom", "timetables", "kline", "anorexia", "errno", "workplaces", "harpercollins", "clk", "heartburn", "empathy", "ica", "motivating", "clockwise", "frisco", "mitzvah", "chong", "bashing", "boosters", "cyl", "grupo", "mikhail", "denominator", "changeset", "cec", "jovencitas", "texttt", "islamabad", "freestanding", "resilient", "eyewitness", "spartanburg", "hippo", "trung", "tenancy", "offsite", "realaudio", "clements", "dogsex", "ticketing", "heterogeneity", "bodied", "dudes", "maytag", "norco", "altos", "sleeved", "overs", "watercraft", "scully", "cellulose", "cathode", "monographs", "nra", "digitized", "rotated", "gaia", "motown", "pryor", "sato", "greeley", "ccr", "agro", "ramos", "quizilla", "citibank", "scotty", "pvp", "meridien", "taxa", "brunettes", "bic", "irl", "mfa", "endo", "unhelpful", "microorganisms", "twister", "krakow", "sequoia", "emt", "activator", "incredibles", "familial", "marquee", "resilience", "thermodynamics", "seton", "makita", "subgroups", "catchy", "aia", "tig", "synaptic", "bobcats", "zappa", "eec", "chicas", "swahili", "nlp", "dzwonki", "enrolling", "commercialization", "smt", "cataloging", "snowboards", "sami", "tesla", "elan", "csd", "ingrid", "longman", "unleaded", "mesquite", "kroner", "frm", "javadoc", "hotbot", "denali", "inhibitory", "phonics", "dbs", "refs", "smh", "thaliana", "meningitis", "motivations", "rees", "asteroid", "donegal", "endings", "mwf", "unlisted", "philippians", "conductive", "sooo", "echostar", "microscopes", "kenmore", "reagent", "achievable", "dla", "glamorous", "interacts", "litchfield", "lavoro", "hobbynutten", "chomsky", "venezia", "yamamoto", "zhu", "interleukin", "flashcards", "homologene", "interception", "voltages", "assignee", "kip", "bla", "algarve", "valance", "stc", "pisces", "cpanel", "orc", "hemingway", "gti", "hdl", "rendition", "danmark", "yun", "sourcebook", "hui", "matador", "smut", "nac", "dang", "bradenton", "meetups", "bilbao", "ewan", "cwa", "akai", "deletes", "adjudication", "autoconf", "rasmussen", "bibliographies", "milne", "fsc", "unplugged", "ttc", "currie", "torvalds", "neff", "tailgate", "hollis", "lanier", "overseeing", "escalation", "polymorphism", "semitism", "sevenfold", "colocation", "woodbury", "tshirt", "epidemiological", "medic", "grail", "espana", "horne", "nostalgic", "aldrich", "tabled", "farsi", "excelsior", "rial", "greenspan", "dhabi", "chobe", "tafe", "pz", "andrei", "frazier", "criminology", "jeanette", "constel", "talkin", "dup", "syd", "permittee", "hangover", "capitalize", "fsu", "motocross", "boomers", "wedgwood", "mcdermott", "youngs", "lep", "grossman", "pecan", "freshmeat", "fnal", "benzene", "mcp", "topper", "ittoolbox", "manny", "arse", "osteoarthritis", "westlake", "czechoslovakia", "addictions", "taxonomic", "judo", "mizuno", "palmetto", "telco", "ltc", "microarray", "electrolux", "elephantlist", "sparked", "qualcomm", "whitaker", "opc", "connelly", "conner", "hospitalized", "fec", "opml", "cana", "ation", "entitlements", "wingate", "healey", "jabra", "qmail", "soybeans", "awd", "electrostatic", "topological", "coz", "oversize", "westinghouse", "unk", "reb", "rios", "craftsmanship", "cic", "pyle", "seuss", "cheetah", "ldp", "competed", "fridges", "hatchery", "judgements", "msr", "zr", "corbett", "asx", "curr", "fingerprints", "conv", "cheesy", "ahmedabad", "dimlist", "winfield", "pinto", "gallerys", "jana", "martindale", "webstatistics", "dhl", "mays", "risc", "hcv", "oboe", "tzu", "hurd", "geotrack", "kolkata", "imation", "hematology", "expressway", "steelhead", "ahh", "turntables", "lindholm", "clooney", "facilitators", "mcnamara", "shiva", "toners", "kenyan", "wynn", "hsa", "motorbike", "niles", "zippo", "sergei", "upfront", "battlefront", "gosh", "fansite", "colossians", "addicting", "gerd", "copa", "gtp", "zlib", "whitespace", "tektronix", "doesn", "mccullough", "cnr", "microfiber", "mdc", "tsa", "deployments", "stearns", "insurgency", "boyer", "behringer", "akg", "ttm", "perceptual", "fz", "midlothian", "follando", "instr", "ott", "bsn", "rambler", "drywall", "suzy", "dekalb", "sumo", "topsites", "hsc", "tse", "refurbishment", "pfam", "tdi", "grassland", "jeffery", "councilman", "swaps", "unbranded", "astronauts", "lockers", "lookups", "attackers", "actuator", "reston", "sftp", "reinstall", "lander", "coby", "methanol", "miscellany", "simplifying", "slowdown", "bridesmaid", "transistors", "marys", "colgate", "lousy", "pharm", "foreseeable", "nutritionists", "techweb", "berkley", "resistors", "blondie", "drwxr", "cfc", "isu", "stm", "villanova", "iw", "tif", "cbi", "cesar", "heuristic", "archivist", "gallup", "valtrex", "usn", "antimicrobial", "biologist", "cobol", "homolog", "fruity", "stratus", "fips", "urea", "bumpers", "lumix", "wildcard", "rvs", "desnudas", "plextor", "oxidative", "brits", "healy", "pliers", "kayaks", "ibanez", "marxist", "couldnt", "naperville", "diplomas", "fieldwork", "damping", "immunol", "regan", "wwwroot", "bootleg", "intellectuals", "winslow", "minis", "rhs", "leftist", "tequila", "limoges", "wildwood", "oop", "germantown", "bergman", "gmac", "pulitzer", "tapered", "mollige", "toothbrush", "delegations", "plutonium", "factsheet", "squarepants", "subsurface", "guadalupe", "halliburton", "underscore", "borg", "glutamine", "slutty", "mcphee", "doa", "herbicides", "usgenweb", "inscribed", "chainsaw", "tablature", "fertilization", "glitch", "gearbox", "stang", "alejandro", "tensile", "varchar", "intercom", "ase", "osg", "mckee", "envisaged", "splice", "splicing", "campfire", "cardbus", "hubby", "graphing", "biologists", "improv", "hempstead", "exilim", "xlr", "debuts", "esi", "diskette", "ubs", "commend", "contender", "southland", "spie", "globals", "diaspora", "anu", "moratorium", "safes", "goodnight", "alcoholics", "asme", "gatlinburg", "cai", "pharmacol", "swe", "xorg", "newsquest", "wavelengths", "unclaimed", "racquet", "cout", "cytoplasmic", "qaida", "kpmg", "lanarkshire", "steakhouse", "stubs", "solarium", "sedo", "fillmore", "shox", "greenhouses", "spotlights", "perks", "harlow", "morrissey", "igp", "lutz", "capacitance", "birthstone", "primitives", "bong", "lingual", "unframed", "iter", "vibes", "tmdl", "programa", "republication", "zap", "veneto", "zhao", "hippie", "acyclovir", "benoit", "organizes", "unaudited", "rz", "summertime", "airbag", "lal", "sweetwater", "bjc", "cfm", "internationale", "krystal", "expansions", "gms", "correlate", "linkout", "poc", "pittsburg", "bylaw", "kenyon", "trims", "epiphany", "pny", "devin", "viewfinder", "homewood", "mcrae", "hind", "renaming", "plainfield", "maxon", "sprintf", "armagh", "livechat", "pdr", "bhp", "lyman", "notfound", "pho", "pathogen", "zagreb", "gayle", "ust", "overwrite", "revitalization", "camry", "postmodern", "jayne", "hci", "kuhn", "typos", "glutamate", "melton", "oneworld", "realtone", "mikey", "telephoto", "pooling", "jy", "drury", "ctw", "tbs", "sct", "custer", "borderline", "surgeries", "lobbyists", "sfo", "zionist", "gaskets", "photoblog", "cushing", "nonstop", "hummel", "corgi", "ellie", "citigroup", "seasonally", "uci", "bizwomen", "dti", "malkin", "adbrite", "psychosocial", "butthole", "ellsworth", "cline", "backlog", "thema", "filmmaking", "wwi", "townhomes", "usf", "instapundit", "mcmaster", "bayside", "thinkcentre", "cea", "biophys", "hodgkin", "vhosts", "laughlin", "congresses", "electrically", "ophthalmic", "yz", "prong", "unreleased", "ipa", "chaplin", "dfw", "histology", "gilman", "klamath", "atrial", "equalizer", "vbscript", "helmut", "lynda", "vax", "yak", "silt", "councilmember", "endorses", "expos", "cherish", "aap", "undead", "pto", "critters", "blob", "kurds", "ela", "ical", "macleod", "devry", "rahman", "fundamentalist", "subtraction", "superstars", "chmod", "leveling", "piggy", "stadiums", "playable", "uz", "sunos", "lancia", "perf", "interconnected", "tunning", "whitepaper", "platt", "lexis", "virology", "csm", "purcell", "vidal", "svcs", "subsystems", "oxfam", "johnstown", "beading", "robustness", "ifn", "interplay", "ayurveda", "mainline", "folic", "vallejo", "ratchet", "cee", "yl", "yee", "wicca", "cygnus", "depiction", "jpl", "tiered", "optima", "seward", "photons", "transactional", "lhc", "doggy", "anodized", "exxon", "hurdle", "donnelly", "metastatic", "encyclopaedia", "errata", "divas", "ong", "trey", "thankyou", "alerting", "insofar", "smileys", "surrogate", "breathable", "differed", "dickies", "gonzo", "programmatic", "trs", "teammates", "barrymore", "ddd", "barracuda", "accesskey", "appellants", "usergroups", "initiates", "pwd", "mation", "aiwa", "whiting", "grizzlies", "okidata", "methadone", "offsets", "tryin", "jodie", "jdk", "tallinn", "descarga", "monterrey", "harrogate", "lotteries", "bozeman", "coauthor", "cybershot", "airflow", "thur", "oper", "stn", "unattached", "maher", "karlsruhe", "yuri", "cheung", "honeymooners", "cheaptickets", "howie", "dieter", "centerpiece", "mplayer", "unwind", "outings", "crotch", "wavelet", "nothin", "pathogenesis", "diodes", "realestate", "reinstatement", "botox", "nge", "dipole", "cleo", "norge", "kata", "tangled", "giga", "walsall", "burnaby", "lilo", "adf", "majorca", "agribusiness", "validator", "jax", "pixie", "proofing", "clits", "keyring", "vehicular", "workbench", "deph", "landscaped", "aziz", "lula", "nucl", "farber", "impala", "commenter", "celsius", "flicks", "hardwear", "prefixes", "racquetball", "endl", "flavours", "pundits", "unset", "murano", "optimised", "bariatric", "hitchhiker", "isotopes", "entrez", "erich", "conduction", "grabber", "orch", "peridot", "produc", "skechers", "pacers", "salvatore", "nts", "rbc", "neurosci", "parton", "apec", "centerville", "mcl", "ebuyer", "dermatitis", "roxio", "nagoya", "sfc", "snowfall", "sss", "fundraisers", "fecal", "vorbis", "hazzard", "lbp", "gorman", "validating", "healthday", "newsstand", "dossier", "psion", "tcc", "corbin", "songwriting", "ecg", "hinton", "nighttime", "fluxes", "kombat", "finders", "dictated", "darlene", "westcott", "dca", "lua", "lpg", "opti", "proximal", "canciones", "irix", "qp", "peroxide", "bryn", "erm", "rfi", "outages", "complemented", "finley", "thanh", "backlash", "gallo", "agence", "zs", "kjv", "jonny", "biblio", "qm", "opacity", "userland", "townsville", "turing", "veggies", "centenary", "barclays", "eid", "drexel", "pedagogical", "lockhart", "fishnet", "combinatorial", "unintended", "raman", "rochdale", "prnewswire", "sthn", "smog", "ucl", "poa", "mics", "punjabi", "prem", "katalog", "kettering", "hayek", "brookline", "montpelier", "titty", "ntt", "fart", "oxidase", "qw", "caterer", "pregnancies", "fiori", "dateline", "stdout", "unassigned", "adriana", "lyndon", "groupings", "mems", "midterm", "campsite", "dropdown", "marketer", "huntingdon", "jcpenney", "gelatin", "qvc", "adenosine", "milliseconds", "swatch", "redefine", "backdoor", "jazeera", "envisioned", "pws", "extrem", "automating", "sempron", "cursors", "divert", "phnom", "tbc", "kanji", "vod", "recreate", "smackdown", "dropout", "jrst", "fallujah", "lockout", "moron", "tnf", "townhouses", "horrific", "abacus", "lifeline", "gto", "torquay", "dao", "conjugate", "winch", "elektra", "webtrends", "shes", "sabotage", "blueprints", "limos", "fraunhofer", "warhol", "suppressor", "dogpile", "birt", "rensselaer", "jocks", "unzip", "floss", "sarge", "endnote", "leland", "telugu", "midwifery", "huff", "pornos", "primates", "rmi", "tangerine", "amoxicillin", "graz", "basingstoke", "crawler", "angled", "comin", "longhorns", "doha", "ebsco", "lynchburg", "overriding", "wilshire", "ard", "wachovia", "groff", "ects", "lok", "invicta", "dongle", "ecumenical", "tanaka", "internacional", "kwan", "cdl", "archiv", "placid", "lenin", "marsha", "gradients", "ritalin", "retrieves", "ferrous", "dhaka", "zillion", "chino", "ltr", "caveat", "gangbangs", "toiletries", "bedrock", "clio", "zines", "multipart", "forklift", "repurchase", "orthopedics", "wsw", "vnc", "nfpa", "dnf", "badgers", "chp", "kinh", "appetizer", "disbursement", "weblinks", "telemetry", "consumable", "winn", "depressive", "stabilizer", "ovary", "rune", "accrual", "creatively", "amateure", "abd", "interfaith", "cay", "automata", "northwood", "payers", "gritty", "dewitt", "rect", "ipx", "sebring", "reborn", "bia", "lagrange", "treadmills", "bebop", "streamlining", "trainings", "seeding", "ulysses", "industrialized", "botanic", "bronco", "moodle", "chased", "cti", "intermediaries", "tei", "rotations", "knoppix", "montessori", "biomed", "murine", "entomology", "rodent", "paradigms", "lms", "putter", "fonda", "recursion", "flops", "initiator", "hsu", "pobox", "zeiss", "ferc", "tanf", "sunscreen", "llvm", "antidepressants", "decentralized", "freaking", "whittier", "elmira", "bassist", "oakville", "skaters", "luminosity", "emulators", "toefl", "keychains", "karat", "modis", "ginny", "egan", "posh", "bangles", "stereos", "submittal", "bnib", "moh", "mink", "simulators", "nagar", "zorro", "ecran", "ealing", "ozark", "pfeiffer", "miers", "vickers", "interactivity", "corso", "constructors", "doj", "ipm", "rnd", "jama", "lsi", "malfunction", "magma", "smithfield", "gtr", "canucks", "hammersmith", "sdi", "cricos", "blum", "parkland", "pcbs", "werewolf", "wnw", "midwestern", "ezboard", "charisma", "chilli", "iac", "suspensions", "nss", "smi", "malnutrition", "logcheck", "layton", "gaines", "inbred", "intercultural", "skateboards", "mainboard", "goshen", "functionally", "rabies", "catalysts", "datetime", "readability", "dakar", "dspace", "cappuccino", "modulus", "krause", "cuisines", "maclean", "tuscaloosa", "boosted", "sprayed", "gearing", "glutathione", "adoptions", "tweaking", "angina", "geeky", "rnb", "coupler", "lexapro", "aig", "paisapay", "zanussi", "minimizes", "hillsdale", "balboa", "penh", "wainwright", "agc", "guadalajara", "pinellas", "umts", "zappos", "daimler", "spo", "tadalafil", "everglades", "chipping", "montage", "geelong", "ionization", "broome", "biases", "sprawl", "marantz", "alfredo", "haunt", "hedging", "insulating", "mcclure", "vbr", "qed", "waterfowl", "adress", "reacting", "virtualization", "itat", "collide", "syst", "mankato", "segregated", "ests", "avengers", "technologist", "pigments", "impacting", "lamont", "aquariums", "rigs", "arginine", "moot", "pleasanton", "televised", "giftshealth", "acd", "simplistic", "hepa", "amphibians", "encapsulated", "injector", "kessler", "gardenjewelrykids", "leung", "edo", "impl", "grained", "relatos", "newsday", "gmat", "dani", "announcer", "barnsley", "cyclobenzaprine", "polycarbonate", "dvm", "marlow", "thq", "osce", "hackett", "divider", "cortez", "associative", "cmo", "rsync", "minivan", "victorinox", "chimp", "flashcoders", "giraffe", "pia", "stroud", "lefty", "cmg", "westside", "heres", "azimuth", "logistical", "firenze", "okavango", "jansen", "tween", "payback", "hydraulics", "endpoints", "perrin", "quantification", "coolant", "nanaimo", "yahooligans", "prilosec", "hutchison", "parsed", "shamrock", "schmitt", "korg", "warmers", "newt", "frontend", "itanium", "alleles", "weiner", "ola", "halftime", "frye", "albright", "wmf", "clemente", "handwritten", "whsle", "launceston", "wembley", "sandman", "mejores", "scoops", "dwg", "truetype", "eigenvalues", "airbrush", "ppb", "comms", "regexp", "quickstart", "beaverton", "trucker", "willamette", "chiropractors", "tyco", "mirroring", "massively", "aeronautical", "lasalle", "pwr", "wordlet", "hanford", "plac", "exhibitionism", "riser", "redux", "gaim", "audiobook", "compensatory", "couplings", "jeezy", "monsanto", "cleric", "rfq", "contactos", "esri", "equiv", "macrophages", "yao", "npt", "computes", "pickett", "oid", "charismatic", "lda", "teleconference", "mma", "whitepapers", "polycom", "tux", "asymmetry", "xpass", "cfd", "barbour", "tijuana", "niv", "hamiltonian", "cdg", "algebras", "quotient", "wildcat", "inlay", "peta", "paco", "avocado", "octets", "dubuque", "evaluator", "gid", "jumpers", "edmunds", "lerner", "manifolds", "awg", "napoli", "kristy", "variances", "pki", "objectivity", "sistema", "massager", "incubated", "feedster", "federer", "turnovers", "bev", "eai", "changers", "frs", "hereto", "osc", "clinician", "alltel", "gss", "curacao", "rapporteur", "arcserve", "gump", "powerline", "aspell", "avp", "safeguarding", "paxton", "herbie", "yabb", "chromosomal", "hickman", "runescape", "salesperson", "superfamily", "tupac", "cassini", "tobin", "zoos", "activates", "hibernate", "ning", "extremists", "montego", "rohs", "cyclical", "cytokines", "improvisation", "mmorpg", "toured", "tpc", "flatts", "cmf", "archiver", "rainer", "rsc", "covariance", "bobble", "vargas", "gulfport", "airfield", "flipping", "disrupted", "restocking", "lgbt", "extremetech", "citrine", "neoplasm", "rethinking", "xfn", "orientations", "calumet", "pellet", "doggie", "inflow", "msw", "lymphocyte", "weinberg", "saigon", "whiteboard", "wic", "brody", "invertebrates", "elliptic", "ffa", "agonist", "hyperion", "partypoker", "rockingham", "sandler", "schweiz", "grundig", "rethink", "musculoskeletal", "aggies", "prereq", "nikita", "aetna", "truckers", "giro", "laserdisc", "kaspersky", "dor", "determinant", "morpheus", "ayers", "junkies", "ccna", "jacquard", "assesses", "okinawa", "autoscan", "quantified", "pnp", "uppsala", "distortions", "subclasses", "glo", "condolences", "hitter", "livelihoods", "psf", "cala", "telluride", "apnea", "mkt", "floodplain", "valera", "wenger", "crusader", "backlinks", "alphabetic", "delonghi", "tailoring", "shavers", "mcdonnell", "aborted", "blenders", "symphonic", "asker", "huffman", "alistair", "navarro", "modernity", "wep", "uab", "olp", "booties", "cancels", "newsblog", "gangsta", "mgp", "foodservice", "teton", "newline", "prioritize", "clashes", "crohn", "bao", "quicklinks", "ethos", "hauppauge", "solenoid", "stis", "underdog", "fredericton", "tep", "bextra", "copywriting", "technol", "mdr", "asteroids", "continous", "hplc", "ovulation", "doggystyle", "quasar", "euthanasia", "schulz", "okanagan", "liters", "tarrant", "blacklist", "clermont", "rooftop", "ebert", "goldfish", "witherspoon", "slimline", "animator", "barbra", "irreversible", "flanagan", "encyclopedias", "csiro", "downtempo", "campsites", "graco", "lighthouses", "xg", "adt", "hemoglobin", "tung", "svga", "postpartum", "condi", "yoda", "jst", "dalai", "xn", "nytimes", "kenzo", "alden", "trampoline", "zi", "restricts", "gees", "intakes", "dogfart", "swearing", "ith", "montel", "ubbcode", "yw", "ninemsn", "lgpl", "jsf", "psychotic", "allyn", "higgs", "pulsed", "ignite", "hornet", "atypical", "contraceptives", "slimming", "dispatcher", "devoid", "jms", "maricopa", "mbs", "northfield", "idf", "elites", "fifo", "correlates", "casters", "heisse", "easygals", "mandalay", "haircare", "climbers", "atty", "madera", "calibex", "mailbag", "smartmedia", "vilnius", "dbl", "doping", "postwar", "strat", "bsp", "barebone", "thrombosis", "smarty", "whitley", "lse", "windermere", "curtin", "dilemmas", "cci", "gwynedd", "edwardian", "hppa", "saunas", "horowitz", "cna", "undergrad", "mocha", "escada", "knockers", "jitter", "supernova", "loughborough", "directtv", "feminization", "extremist", "tuttle", "aoc", "medway", "hobbit", "hetatm", "multipurpose", "dword", "herbalife", "ocala", "cohesive", "bjorn", "dutton", "eich", "tonne", "lifebook", "caster", "critiquer", "glycol", "manicure", "medial", "neopets", "accesories", "faxed", "bloomsbury", "mccabe", "ennis", "colossal", "karting", "mcdaniel", "aci", "brio", "baskerville", "syndromes", "kinney", "northridge", "acr", "emea", "trimble", "webinars", "triples", "boutiques", "freeview", "gro", "screener", "janine", "hanukkah", "caf", "adsorption", "sro", "underwriters", "foxx", "ppi", "noc", "brunton", "mendocino", "pima", "actuators", "internationalization", "wht", "pixies", "pancake", "transmembrane", "photostream", "guerrero", "firth", "hathaway", "emf", "beatty", "andersson", "lunchtime", "miro", "slams", "looping", "crates", "undated", "takahashi", "ramadan", "lowercase", "technologically", "anaerobic", "satelite", "pioneered", "tabloid", "pred", "solubility", "troubleshoot", "etf", "hatcher", "coders", "insecticides", "electrolyte", "watanabe", "firestone", "writeshield", "sph", "descargar", "letterhead", "polypeptide", "velour", "bachelorette", "nurs", "geospatial", "zoned", "pubic", "pizzeria", "mirc", "henning", "acf", "bae", "nitrous", "airspace", "santorini", "vdr", "tms", "convertor", "brahms", "genomes", "workable", "ordinate", "seminal", "rodents", "ytd", "xin", "precursors", "relevancy", "koala", "discus", "giftware", "realistically", "hol", "polska", "loci", "nanotech", "subunits", "awsome", "hula", "laramie", "toothpaste", "maxine", "mennonite", "subtitled", "qms", "maidstone", "abr", "sda", "jcb", "wpa", "fastener", "ctf", "foxy", "sexiest", "jupiterimages", "categorization", "inclusions", "fosters", "conc", "transsexuel", "limbaugh", "cassie", "altman", "lethbridge", "peng", "fillers", "symposia", "nia", "templeton", "stds", "hav", "typography", "ebitda", "eliminator", "accu", "saf", "gardenjewelrykidsmore", "gazebo", "preprint", "htc", "naxos", "bobbi", "cocker", "steph", "protonix", "systemax", "retry", "radford", "implantation", "telex", "humberside", "globalspec", "gsi", "kofi", "musharraf", "detoxification", "ree", "mcnally", "pma", "aureus", "informationweek", "chm", "bonneville", "hpc", "beltway", "epicor", "arrl", "iscsi", "grosse", "dfi", "penang", "zippered", "simi", "brownies", "lessor", "kinases", "panelists", "charlene", "autistic", "riu", "equalization", "corvallis", "reused", "volokh", "vari", "fordham", "hydroxy", "technologists", "snd", "dempsey", "httpdocs", "speakerphone", "reissues", "shalom", "khmer", "recordable", "dlt", "dredging", "dtv", "extrusion", "rtn", "preggo", "defamation", "theron", "proteomics", "spawned", "cep", "phendimetrazine", "wiener", "theorems", "samplers", "rfa", "pasco", "hilbert", "tamworth", "itmj", "msd", "etfs", "cde", "praha", "zona", "landry", "crackdown", "lifespan", "maybach", "cysteine", "responsibly", "slideshows", "aceh", "techtarget", "geotechnical", "fantasia", "camisole", "atoll", "shredders", "gags", "rips", "futurama", "hari", "ironman", "ducts", "marmot", "remand", "hawkes", "spoof", "spammer", "presets", "separations", "penicillin", "amman", "davos", "maturation", "internals", "bungalows", "beckinsale", "refractive", "grader", "ecd", "transducers", "ctxt", "doxygen", "rtd", "akc", "cgc", "intercollegiate", "zithromax", "onkyo", "niosh", "rainier", "furman", "newsfeeds", "larkin", "biztalk", "snapper", "hefty", "ipr", "valdosta", "ulead", "delaney", "hairless", "lactation", "innsbruck", "offbeat", "teenie", "protons", "machined", "holman", "eviction", "dic", "pio", "regionally", "thurman", "canaria", "showcasing", "afa", "certifies", "primes", "renton", "lambeth", "frappr", "liturgical", "easements", "aida", "openafs", "assword", "rving", "exogenous", "sram", "sault", "trolls", "flor", "rfe", "oleg", "smo", "analyzers", "scorer", "swami", "oilers", "nik", "mandela", "listers", "ordinated", "arlene", "dividers", "recoverable", "gators", "intraday", "cruces", "hollister", "enews", "lactose", "gifford", "competitively", "rockstar", "hampstead", "chrono", "nahum", "raja", "nextlast", "xinhua", "ltl", "lofts", "feral", "neurosurgery", "ringgit", "ukranian", "parmesan", "kiosks", "pnt", "hooking", "wip", "rawlings", "physiotherapy", "wrexham", "billabong", "prepayment", "jonesboro", "bangers", "handgun", "miscategorized", "itp", "desoto", "innovator", "mitochondria", "mewn", "sername", "usmc", "amicus", "vijay", "redirecting", "gma", "shih", "cervix", "biblia", "cosby", "lufthansa", "msnshopping", "sewerage", "ele", "mantis", "alerted", "lsp", "intron", "bri", "remodel", "carpal", "natalia", "cjk", "specialises", "condiments", "adventist", "eggplant", "coun", "ctv", "wycombe", "monaghan", "blogarama", "undocumented", "esb", "vaccinations", "gutierrez", "bernd", "needham", "inuit", "wordnet", "wedi", "keyes", "photocopying", "tca", "avn", "dressage", "cafepress", "phylogenetic", "kurtz", "morbid", "inno", "refresher", "freakonomics", "impreza", "cheeky", "arco", "proponent", "brasileiro", "kar", "rojo", "perscription", "aic", "streisand", "eastside", "bioethics", "redo", "piranha", "rps", "cmu", "uncompressed", "vps", "pseudomonas", "sotheby", "avionics", "minimization", "ascot", "linearly", "dolan", "titleist", "genesee", "grays", "fdc", "psychiatrists", "bom", "multiplex", "srt", "bradbury", "babysitting", "asd", "beehive", "aeon", "livin", "leblanc", "shorty", "injecting", "discontinuity", "littlewoods", "enquirer", "downturn", "fission", "modulator", "spybot", "hrc", "worldview", "choreography", "sfx", "nth", "buffering", "denison", "killarney", "scoping", "srm", "mammography", "epc", "nepalese", "communicable", "enzymatic", "melanogaster", "extravaganza", "kamloops", "spss", "tftp", "rotherham", "underestimate", "hana", "mycareer", "pra", "cooley", "gratuitement", "eriksson", "schaumburg", "exponentially", "chechen", "carribean", "bunnies", "choppers", "psyc", "pedersen", "earphones", "outflow", "scarab", "toasters", "skiers", "eax", "jamal", "raunchy", "biologically", "nbr", "ptc", "qe", "zyrtec", "riyadh", "pell", "quicksearch", "coates", "octane", "mtl", "krabi", "funders", "apj", "kal", "fai", "ccp", "environmentalists", "fatah", "ifa", "ackerman", "gbc", "soooo", "soapbox", "newberry", "deanna", "bestellen", "elongation", "webcrawler", "wanking", "ofsted", "yb", "dortmund", "boardroom", "nico", "taping", "mro", "atleast", "somatic", "fcs", "niki", "malloc", "lanzarote", "slump", "nerds", "laude", "mec", "simulating", "enrol", "bts", "cflags", "xps", "datafieldname", "wycliffe", "dda", "apts", "aikido", "slo", "batches", "dap", "ssr", "kournikova", "moshe", "fsbo", "shippers", "mtc", "cav", "rrr", "wildflowers", "polygons", "delimited", "noncompliance", "upi", "sna", "vidsvidsvids", "herts", "bellagio", "webapp", "haryana", "eeg", "dlls", "babysitter", "linotype", "produkte", "lesbica", "pes", "mediators", "hone", "riggs", "jockeys", "seater", "brightstor", "deliverable", "sanding", "buffered", "orton", "indesign", "lakeshore", "ctl", "aland", "clarins", "pelham", "huf", "ronin", "comps", "mgi", "greco", "kontakte", "edema", "leaderboard", "mce", "hsv", "geocities", "argc", "palos", "ori", "carotid", "citi", "squish", "cny", "gorham", "calphalon", "blasen", "midwives", "nara", "nab", "netbeans", "cyclones", "tapety", "snowflake", "blackhawk", "weinstein", "sterilization", "assessors", "chenille", "dehydration", "haircut", "fhwa", "misconceptions", "alternet", "undeclared", "bari", "songwriters", "tolerances", "incarceration", "hierarchies", "redondo", "lactating", "aquamarine", "yg", "edm", "sedimentation", "optometry", "mobilize", "attendee", "bmd", "dialogs", "rpt", "viktor", "trajectories", "federico", "openvms", "ppo", "pag", "precio", "leapfrog", "thermoplastic", "sexchat", "kingman", "deterrent", "ghraib", "duplicating", "tuba", "encodes", "garamond", "cirrus", "alanis", "kilometer", "ballarat", "wacom", "nsta", "actionscript", "ivf", "modifiers", "hijack", "thomasville", "accorded", "fryer", "namco", "xmms", "dammit", "produkter", "motorhome", "ade", "mfrs", "editable", "greats", "milosevic", "marcy", "boron", "creighton", "wolfenstein", "bolivian", "rowbox", "pauls", "phobia", "superfund", "vcc", "sadler", "piercings", "riffs", "briana", "geronimo", "tetra", "freakin", "alb", "retrofit", "cytokine", "stylesheets", "coalitions", "tactile", "cinematography", "vivitar", "wannabe", "blogwise", "amador", "skier", "storyteller", "bpa", "pelicula", "ischemia", "fms", "comput", "wristbands", "livecams", "hibiscus", "rheumatology", "edn", "somers", "cray", "iol", "waterbury", "selectivity", "carlow", "maxx", "haggai", "demonstrators", "raiser", "sanger", "mullen", "periphery", "predictors", "woodwind", "snl", "modblog", "repo", "burnley", "antispyware", "sumter", "rcd", "woodside", "tylenol", "megabytes", "backlight", "naturist", "zephaniah", "airbags", "plethora", "cabriolet", "yh", "retiree", "atol", "sonet", "anthropological", "mikasa", "iverson", "cae", "buckeye", "dollhouse", "stereotype", "uship", "ubisoft", "escalade", "breakaway", "produkt", "sealants", "montclair", "dinghy", "gnus", "melia", "feedbacks", "concurrency", "healthgrades", "hoya", "revista", "lrc", "flied", "tvr", "joliet", "ped", "chappell", "wollongong", "peo", "blowers", "doubleday", "guidant", "remodeled", "eea", "bcp", "situational", "nasd", "chakra", "dfa", "jammu", "wetsuits", "edc", "birkenstock", "vivendi", "emulsion", "fielder", "sorta", "courseware", "biosphere", "skb", "plumpers", "muschi", "qcd", "ollie", "gurgaon", "rwxr", "federalism", "gizmodo", "laminating", "coltrane", "colitis", "unincorporated", "liang", "blogged", "cryogenic", "antispam", "homologous", "hassles", "symptomatic", "rtc", "trademanager", "bipartisan", "rhodium", "exchanger", "preseason", "januar", "bumble", "intimidating", "randi", "placenta", "abbotsford", "upn", "dulles", "brainstorming", "wea", "dougherty", "sarcoma", "sniffer", "rotorua", "bahasa", "iona", "bioscience", "tricia", "residuals", "gforge", "copd", "homie", "leesburg", "afm", "xref", "flashpoint", "mobygames", "cortland", "mailers", "tented", "nicholls", "skew", "mahoney", "infoplease", "budd", "acn", "hollands", "muni", "modernism", "elizabethtown", "dunhill", "eee", "didn", "guidebooks", "scotts", "wye", "wsj", "biosciences", "macgregor", "atms", "habakkuk", "depaul", "binge", "cyst", "hexadecimal", "scissor", "progra", "smyth", "mott", "jazzy", "headboard", "diflucan", "bronson", "standardised", "cations", "cics", "ecole", "centos", "hysterectomy", "housings", "wrc", "movado", "mcdonough", "krista", "pharmacokinetics", "chantal", "morristown", "riverview", "loopback", "torsion", "ultrastructure", "lucida", "leftover", "sykes", "anecdotal", "rheims", "integrators", "unlv", "arboretum", "sharealike", "lowepro", "erc", "ischemic", "illustrators", "plugging", "macbook", "bjp", "arent", "vignette", "qf", "homebrew", "altoona", "pheromone", "fireball", "decorator", "franken", "netpbm", "antalya", "harmonious", "nne", "recordkeeping", "modernisation", "myx", "sdr", "muskegon", "daley", "modality", "liberalisation", "utilise", "arturo", "appellee", "granules", "multidimensional", "rollout", "homegrown", "datamonitor", "reinforces", "dirham", "leahy", "myc", "esophageal", "kira", "approximations", "forzieri", "intermediates", "kgs", "albumin", "grantees", "loveland", "maloney", "sativa", "paramedic", "trademarked", "edgewood", "stressing", "potable", "limpopo", "intensities", "oncogene", "antidepressant", "ballpark", "powys", "orca", "mascara", "proline", "molina", "nema", "wipers", "snoopy", "informationen", "esf", "riverdale", "unleash", "juelz", "bls", "noarch", "koss", "captioned", "paq", "summarizing", "ucsd", "gleason", "baritone", "independant", "chlamydia", "relativistic", "rotors", "driscoll", "andalucia", "mulher", "bagels", "subliminal", "insecticide", "segal", "spline", "undisclosed", "noni", "letterman", "almeria", "bryson", "wtb", "towson", "htaccess", "malayalam", "crue", "loo", "pinoy", "pallets", "uplink", "sheboygan", "terrence", "ghc", "gateshead", "probationary", "abducted", "warlock", "breakup", "fiche", "juror", "bowden", "goggle", "metabolites", "brainstorm", "smu", "ahl", "bateman", "egcs", "chirac", "museo", "coffeehouse", "scitech", "gcn", "trolling", "elmore", "grads", "lz", "andi", "localpref", "kayla", "ccl", "smeg", "donut", "libido", "fuselage", "diabetics", "ballerina", "crp", "morgantown", "paseo", "ptsd", "redheads", "curran", "diam", "ragnarok", "hkd", "summarised", "jx", "caitlin", "conscientious", "bandai", "hobs", "eft", "endometriosis", "cushioning", "mcneil", "belvedere", "nar", "acetyl", "boomer", "perinatal", "idm", "automake", "multichannel", "petr", "daredevil", "corcoran", "mrp", "holliday", "daimlerchrysler", "bowes", "mcgowan", "agfa", "mep", "goss", "mulch", "jvm", "harwood", "ranma", "marinas", "mobipocket", "streptococcus", "murcia", "landfills", "mcknight", "edd", "baud", "mcfarland", "designline", "undies", "prepay", "kodiak", "printout", "nonresident", "marysville", "curso", "palmos", "dorsey", "roo", "soulful", "websearch", "infotrac", "mpgs", "fouls", "openssh", "bravenet", "etsi", "serendipity", "tq", "sequentially", "yogi", "landslide", "howtos", "skool", "evolves", "iberia", "anakin", "duffel", "goodrich", "subfamily", "perennials", "ary", "matchmaker", "sagittarius", "locates", "dysfunctional", "maastricht", "bulletproof", "mcr", "uga", "stenosis", "chg", "recentchanges", "abrasion", "eindhoven", "opportunistic", "pcl", "analogs", "bba", "hillcrest", "cantor", "econometric", "trafford", "opie", "cro", "elkhart", "ringers", "diced", "fairgrounds", "cuyahoga", "plt", "cartons", "mustangs", "enc", "addons", "wstrict", "gow", "pharmacological", "headwear", "paediatric", "genitals", "hendricks", "ivr", "telemedicine", "judi", "icom", "academically", "chilton", "cbo", "amaya", "flickrblog", "fulbright", "foaf", "cllr", "xh", "fulltext", "centrum", "tecra", "kinks", "unisys", "preschools", "mcallen", "contoured", "aberdeenshire", "icm", "schenectady", "schematics", "dojo", "eserver", "nin", "interfacing", "borrowings", "hrt", "heparin", "universiteit", "hardcopy", "connective", "nihon", "oso", "adkins", "dunlap", "nsc", "irr", "clonazepam", "wikiname", "gaithersburg", "biophysics", "chromatin", "mathis", "bulova", "roxanne", "fca", "drg", "refurb", "wasteland", "plotter", "findlay", "cymraeg", "alc", "meek", "phonebook", "doodle", "arb", "wabash", "chronologically", "wms", "whitfield", "mchenry", "eide", "assy", "dusseldorf", "mmol", "shabbat", "nclb", "accommodates", "cmi", "stacker", "msf", "touchdowns", "plasmas", "barbell", "awk", "bibs", "sneaky", "smarts", "lankan", "synthetase", "lightwave", "alignments", "coached", "jac", "framingham", "opensource", "restroom", "videography", "lcr", "spatially", "doanh", "preprocessor", "cohn", "aon", "marginally", "ocs", "bak", "cavalli", "ddc", "grunge", "invoicing", "bigtits", "carney", "braintree", "southside", "vca", "flipped", "cabrera", "mindy", "surfaced", "glam", "cowgirl", "loginlogin", "mtr", "nakamura", "layoffs", "matures", "cty", "apm", "iggy", "margarine", "sneaker", "glycoprotein", "gcs", "queued", "sab", "hydroxide", "hanley", "cellulite", "hwang", "mtd", "mcqueen", "passat", "fluff", "shifter", "cartography", "firstprevious", "vito", "predicates", "bcl", "douay", "zeitgeist", "nickelodeon", "dru", "apar", "tending", "hernia", "preisvergleich", "britton", "stabilizing", "socom", "wsis", "anil", "midsize", "pullover", "lpn", "hoodwinked", "photoes", "beastie", "yucca", "harvester", "emmett", "shay", "obstructive", "pacman", "retroactive", "briefed", "bebe", "krusell", "clickz", "kermit", "gizmo", "atherosclerosis", "demography", "migraines", "wallingford", "newborns", "ljubljana", "restarted", "rnc", "meow", "thayer", "kilograms", "packager", "populate", "pembrokeshire", "arcane", "impractical", "tcg", "decentralization", "honeymoons", "authoritarian", "alu", "judaica", "tropicana", "tyan", "cardholder", "peavey", "gothenburg", "geocaching", "ident", "fluoxetine", "tipton", "teva", "lsa", "effortlessly", "failover", "cysts", "primetime", "kenosha", "kokomo", "penney", "snorkel", "amin", "iridium", "dwyer", "conserving", "toppers", "cfg", "tvc", "alternator", "nysgrc", "underwriter", "springhill", "panhandle", "joann", "isoform", "borden", "bombed", "elt", "halton", "guaranteeing", "fasta", "gonzaga", "boobies", "nadine", "breitling", "nutr", "ingersoll", "sandia", "pacs", "azur", "helms", "beos", "srcdir", "sherpa", "tuff", "ligands", "smalltalk", "sorghum", "nucleotides", "mmv", "ebi", "sbd", "lmao", "enhancers", "collaborated", "produ", "lila", "slotted", "nnw", "fila", "decking", "boz", "accelerators", "howstuffworks", "neighbourhoods", "michal", "rab", "hideaway", "dwayne", "coda", "cyanide", "kostenlose", "grotesk", "marek", "interlibrary", "provenance", "sra", "sog", "zinkle", "fanfare", "mapper", "boyce", "mlk", "dystrophy", "infomation", "footballs", "emailemail", "bathurst", "fof", "duracell", "feinstein", "magnavox", "evra", "servlets", "tss", "neill", "epithelium", "thc", "webbing", "bef", "jaya", "mame", "ppe", "emusic", "tso", "epp", "glencoe", "untested", "overviews", "affleck", "flinders", "informationhide", "hearst", "verifies", "reverb", "kays", "commuters", "rcp", "welivetogether", "crit", "sdm", "durbin", "riken", "canceling", "brookhaven", "gauss", "artistry", "phpnuke", "falkirk", "pitts", "dtp", "kwon", "rubric", "headlamp", "operand", "kristi", "yasmin", "gnl", "acdbvertex", "illini", "macho", "ningbo", "staphylococcus", "busting", "foss", "gfp", "yhoo", "sloane", "wooster", "delong", "mdi", "nilsson", "substring", "gac", "smelly", "gallatin", "hangar", "ephemera", "heli", "choo", "testicular", "miramar", "wearable", "carling", "buildup", "weaponry", "swann", "lian", "landline", "entrees", "corpora", "priv", "geeklog", "antiviral", "profiler", "lodi", "minimalist", "wolverines", "bbcode", "protagonist", "rata", "freephone", "plm", "raytheon", "refseq", "kingfisher", "numark", "moline", "esac", "takers", "gts", "amana", "worldcom", "hiroyuki", "procter", "pragma", "winkler", "walleye", "icf", "bagel", "asbury", "alpharetta", "syncmaster", "wists", "xfx", "wicklow", "tsr", "baer", "yf", "cmr", "chil", "leftfield", "lettings", "walkway", "coos", "petrochemical", "fia", "chula", "zalman", "carer", "humankind", "cmms", "hawley", "inverters", "mccormack", "pdu", "faceplates", "yeats", "motorhomes", "cie", "icts", "mcmurray", "zucchini", "lanai", "pwc", "chiral", "fermi", "newsreader", "multiculturalism", "cuddly", "listinfo", "shp", "primedia", "chl", "estrada", "pricey", "shekel", "apn", "diocesan", "readout", "clarifies", "klm", "dimes", "revlon", "dtr", "cranky", "paparazzi", "zheng", "merida", "bambi", "interceptor", "rox", "jamster", "noritake", "banding", "nonstick", "origami", "marketwatch", "yeti", "arf", "umbilical", "linz", "donates", "foursome", "lawrenceville", "azul", "springdale", "moisturizing", "loeb", "isr", "huston", "gatos", "disqualification", "suunto", "angiotensin", "spitfire", "wfp", "realnetworks", "summation", "plame", "querying", "gpc", "autonomic", "fq", "pathname", "novartis", "ufos", "manatee", "qh", "restructure", "larval", "zeu", "socal", "resettlement", "mistakenly", "radiative", "drapes", "intimately", "koreans", "realy", "womans", "groin", "greenway", "spamassassin", "mata", "gigagalleries", "algerian", "frat", "egullet", "electrics", "joni", "stencils", "reinventing", "reqs", "latte", "shaolin", "shopped", "beattie", "hrm", "hypnotherapy", "muppet", "abp", "checkpoints", "tpa", "derechos", "pieter", "timesselect", "viacom", "strcmp", "kardon", "sideshow", "classifier", "westbrook", "repro", "moser", "studi", "sdf", "colonialism", "supermicro", "scorers", "sitcom", "pastries", "aldo", "azim", "authorizations", "holsters", "neuropathy", "backorder", "humphreys", "metroid", "vcs", "nikkor", "mcf", "jacobsen", "conjugated", "lcc", "unethical", "vacances", "whos", "asr", "alphanumeric", "grumpy", "fixedhf", "holm", "sirens", "lfs", "benelux", "caters", "slp", "prasad", "kirkpatrick", "jamahiriya", "tol", "coagulation", "girly", "bnp", "archdiocese", "orbiter", "edgewater", "lem", "keyless", "repatriation", "tortilla", "dissociation", "industrie", "watercolour", "ucb", "waite", "madsen", "mnh", "opticians", "nop", "newmap", "mse", "bottleneck", "regressions", "linton", "sio", "buckeyes", "bodywork", "applique", "jewell", "gef", "hornby", "redefined", "empowers", "informix", "tots", "goalkeeper", "startseite", "blurb", "feedburner", "dominatrix", "norcross", "compiles", "bancorp", "encoders", "pmp", "boomerang", "temecula", "ghg", "structurally", "caveats", "homeownership", "birdie", "disseminating", "lanyard", "horst", "interlock", "pagers", "esophagus", "ocz", "sexshow", "jackpots", "optometrists", "zak", "krueger", "hickey", "erode", "unlicensed", "termite", "ibuprofen", "drugstore", "audiology", "gannon", "integrals", "fremantle", "lysine", "sizzling", "macroeconomics", "tors", "thule", "gtx", "eeprom", "kaleidoscope", "dmitry", "thawte", "busters", "officemax", "absorber", "nessus", "imager", "cebu", "kannada", "sailboat", "hectare", "netball", "furl", "holographic", "defra", "salaam", "respirator", "countertop", "gla", "installments", "hogg", "partying", "weatherford", "sav", "exited", "crispy", "coffees", "knowhere", "sequin", "bendigo", "unis", "bandwagon", "janssen", "myst", "polymerization", "byval", "nozzles", "labview", "snitz", "rpi", "hcc", "unbelievably", "pasting", "butyl", "ppd", "forested", "unrivaled", "roadways", "varna", "maidenhead", "almanacs", "gfx", "randomness", "middlebury", "muon", "ringo", "svr", "caliper", "lmb", "woolf", "innovators", "anode", "microprocessors", "tps", "stk", "siting", "misinformation", "aneurysm", "closeups", "kinsey", "prp", "cnbc", "eroded", "tris", "lonnie", "hartlepool", "bol", "alastair", "agr", "fafsa", "javac", "uclibc", "fodor", "afrikaanse", "colognes", "contestant", "snell", "prescreened", "believable", "anesthesiology", "elmhurst", "misha", "melatonin", "bongo", "rmb", "mdf", "terr", "xw", "bloke", "avc", "oxnard", "cess", "cedex", "electrochemical", "brevard", "brw", "brenner", "slalom", "waterhouse", "calif", "acces", "aquatics", "cari", "lurker", "buffett", "chews", "hoodies", "phony", "vila", "fsf", "gmake", "nikko", "grasslands", "monolithic", "polifoniczne", "bugtraq", "cpage", "engr", "subcontract", "prophylaxis", "texinfo", "ings", "cotswold", "guillermo", "unstructured", "boop", "hitman", "tla", "mercier", "restated", "nukes", "duplicator", "mehta", "macomb", "fundamentalism", "australasian", "isk", "rerun", "moda", "segmented", "cranberries", "leas", "pleated", "handshake", "digests", "innovate", "goode", "erisa", "jeb", "dismantling", "ferrell", "hellometro", "leavenworth", "snowmobiling", "fora", "fdr", "gaba", "vfs", "dlc", "byers", "codon", "webnotify", "sfr", "pylori", "loomis", "acidity", "gershwin", "formaldehyde", "welder", "cyp", "kendra", "switcher", "ocaml", "goldie", "mab", "gooshing", "mockingbird", "ponte", "xlt", "hogwarts", "juicer", "lloyds", "echelon", "gabba", "arranger", "umbro", "metallurgy", "baa", "neq", "liteon", "queuing", "vsize", "shiite", "valuing", "argon", "coheed", "hooray", "flightplan", "carefree", "souza", "kershaw", "millar", "biotin", "salter", "testicles", "morph", "econometrics", "remo", "msec", "marconi", "ote", "receiverdvb", "expatriate", "tantra", "codified", "ncs", "overlays", "thingy", "comforters", "conservatories", "ruskin", "dpf", "cyndi", "germination", "lipoprotein", "ayurvedic", "planetarium", "tribeca", "bihar", "keenan", "discos", "eastbourne", "robles", "gianni", "dxf", "homebuyers", "nogroup", "freescale", "wiccan", "sess", "merrimack", "groton", "billboards", "searcher", "uttar", "mailinglist", "metacrawler", "priser", "osceola", "bioterrorism", "tourmaline", "leatherman", "microns", "unifying", "anaesthesia", "videogame", "aws", "dtc", "chc", "intranets", "escalating", "bluebird", "iucn", "gls", "mahjong", "interstellar", "kenton", "underestimated", "groupsex", "loudspeakers", "flexi", "vst", "junctions", "redman", "transferase", "bvlgari", "hampden", "nls", "selby", "wausau", "stoppers", "snowshoeing", "uppercase", "cirrhosis", "publib", "metrology", "connexion", "stoneware", "moncton", "traci", "krumble", "pathogenic", "rasmus", "raritan", "riverfront", "humanist", "usefull", "pompano", "skewed", "cleary", "nepa", "ludacris", "sequenced", "xiao", "teaming", "flatshare", "aromas", "positional", "alesis", "glycine", "vee", "breakthroughs", "cashback", "throwback", "charlestown", "nexrad", "gestation", "powering", "magee", "osnews", "logins", "sadism", "emb", "muncie", "panoramas", "plenum", "ato", "aotearoa", "foro", "hydrolysis", "flac", "labia", "immunizations", "existential", "umc", "sweaty", "segond", "addis", "beasley", "breached", "rounder", "rectum", "nha", "perched", "jah", "dsr", "lta", "videoconferencing", "cytoplasm", "makin", "sedimentary", "laurier", "aachen", "wnd", "olney", "massimo", "chlorophyll", "scop", "shipyard", "centering", "manley", "sunroof", "dvorak", "etch", "answerer", "briefcases", "gwent", "bogart", "amit", "kaufen", "untranslated", "raffles", "reconnect", "teeny", "benthic", "mcmanus", "infotech", "carlin", "lithograph", "ure", "stoner", "repost", "iras", "resurfacing", "kelli", "spitzer", "jae", "dunne", "hyperbolic", "pstn", "bisque", "anzeigen", "standoff", "westbury", "solano", "kailua", "acoustical", "photovoltaic", "orchestras", "redline", "reggaeton", "qstring", "declan", "tama", "wank", "virol", "iy", "solvers", "linuxworld", "canadiens", "rockabilly", "smokin", "tumours", "loudspeaker", "handicapping", "tatu", "evangelion", "excretion", "breakage", "negra", "horsham", "jing", "petro", "notations", "midgets", "comprar", "homemaker", "neverwinter", "ddt", "categorize", "geophys", "loa", "tga", "foreskin", "jornada", "inetpub", "premierguide", "reflexology", "sophos", "helphelp", "foundries", "registrants", "sweats", "atvs", "capstone", "adecco", "sensei", "publicized", "transessuale", "federalist", "objectweb", "portrays", "postgres", "fesseln", "hidalgo", "prosthetic", "kristine", "microfiche", "dce", "watergate", "setbacks", "karan", "cdata", "kfc", "grandview", "amerisuites", "aural", "gatekeeper", "heinemann", "decommissioning", "nq", "gestion", "thermodynamic", "patrice", "profiled", "disambiguation", "mmmm", "bittersweet", "mul", "gustavo", "isolating", "xine", "bigfoot", "nrw", "mycobacterium", "yamada", "coldwater", "whitehouse", "cultivars", "santorum", "mugabe", "margo", "rundown", "carbondale", "gizmos", "effingham", "beastility", "agus", "ucd", "dowling", "mitac", "steels", "oakdale", "nda", "mystique", "cortislim", "oes", "disp", "loaders", "trouser", "oai", "hoboken", "sepia", "differentials", "sabi", "dancehall", "sarajevo", "brava", "underscores", "roadshow", "fbo", "sabah", "russel", "nephrology", "squamous", "mvn", "wz", "malden", "mita", "orissa", "ise", "vfr", "chianti", "minsk", "coffey", "domestically", "qantas", "brandi", "artefacts", "solihull", "tation", "tchaikovsky", "refineries", "ronan", "pricewaterhousecoopers", "swimsuits", "automates", "wylie", "whomever", "sidelines", "shaffer", "toolbars", "preservatives", "wagga", "kenai", "bobs", "mortensen", "unplanned", "characterisation", "ppa", "mip", "peering", "fopen", "vgn", "wmissing", "csn", "rudd", "bourke", "pelvis", "goodmans", "potluck", "ioffer", "cial", "davidoff", "creamer", "tsc", "gfs", "contax", "columbine", "portables", "fledged", "aquinas", "kidz", "edonkey", "hourglass", "pagetop", "paloma", "gunmen", "disables", "ssangyong", "antiretroviral", "moschino", "hoyt", "okc", "lockport", "pittsfield", "pollack", "hoyle", "arousal", "inhibiting", "reo", "mammary", "trampolines", "hillman", "trimmers", "bridgestone", "muvo", "wcities", "boi", "diddy", "conveyancing", "apl", "echinacea", "rok", "phish", "frigidaire", "oxo", "hah", "halibut", "penrith", "brno", "silverware", "teoma", "rcra", "mlo", "ideologies", "feminists", "fff", "sculpted", "uq", "rta", "embo", "rollin", "contraindications", "einai", "ssrn", "oup", "rebuttal", "underside", "alumnus", "archeology", "preise", "ontologies", "fenders", "frisbee", "hmmmm", "tipo", "hyperactivity", "seagull", "nanotubes", "polos", "bonaire", "hehehe", "fim", "reece", "elsif", "spinners", "annealing", "maximizes", "pld", "ctp", "eurasia", "dickey", "ako", "carpeting", "yorkers", "ltte", "eukaryotic", "bexley", "sions", "bremer", "marisa", "frustrations", "delgado", "resection", "dioxin", "islamist", "brant", "hss", "kubrick", "fft", "touchscreen", "layoff", "facelift", "decoded", "gry", "shitty", "dodger", "ihs", "lessig", "zaf", "revell", "sched", "rpgs", "euphoria", "acuity", "popper", "lockdown", "nsp", "transmittal", "heatsink", "assholes", "hayman", "novi", "equilibria", "requester", "allrecipes", "serialized", "hangzhou", "bjork", "stringer", "nanjing", "milligrams", "jab", "snohomish", "strathclyde", "yoko", "intramural", "curated", "finalised", "tania", "cdd", "gund", "tascam", "noam", "hardstyle", "arun", "cga", "waistband", "fibroblasts", "leandro", "metastasis", "userpics", "greenbelt", "leuven", "printk", "reachable", "pss", "radioactivity", "caine", "gyfer", "boch", "howdy", "cocksucking", "marlon", "timmy", "liga", "gregorian", "reorder", "aerosols", "archeological", "logarithmic", "sexape", "robby", "completions", "yearning", "transporters", "sandalwood", "megs", "idp", "rapidshare", "tsb", "omnibook", "gamepro", "bca", "decontamination", "tamiya", "euclidean", "salina", "woodford", "formalism", "aching", "nbs", "audigy", "libexec", "eyepiece", "bibl", "bobcat", "freehand", "guo", "ltsn", "itil", "nugent", "esr", "sce", "killeen", "jamming", "applicator", "icrc", "mezzanine", "meghan", "cupertino", "logfile", "zed", "humidifier", "padilla", "susanne", "collapses", "yung", "longwood", "krw", "mainstay", "descr", "dtm", "atcc", "tasman", "accessoires", "mucosa", "dachshund", "zf", "syringes", "breakpoint", "telus", "stoney", "nepali", "regimens", "wok", "canola", "slicing", "reproducible", "experi", "skydiving", "sof", "bogota", "discogs", "datagram", "videographers", "cag", "nicks", "platelets", "trannies", "pamper", "nineties", "bracknell", "disinfection", "perfusion", "postseason", "tigerdirect", "smoothie", "punisher", "tabbed", "tcu", "alene", "lismore", "coquitlam", "auctioneers", "somethin", "daniela", "dials", "enhydra", "kyrgyz", "iia", "bianchi", "iata", "zim", "buscador", "roadrunner", "blackhawks", "jsr", "misfits", "quiksilver", "nwn", "sqlite", "siu", "tarantino", "addi", "jkt", "buyout", "replays", "wcs", "adrenergic", "bottling", "caldera", "baseman", "botanicals", "techie", "farr", "vtech", "donde", "beyer", "versiontracker", "pse", "hashcode", "tradeshow", "lewisville", "aster", "transparencies", "bloomingdale", "northrop", "revo", "overkill", "nlrb", "lazio", "enr", "diag", "chiapas", "freedict", "disponible", "morissette", "effortless", "hydroelectric", "cranial", "hindsight", "orientated", "abrasives", "fpc", "brl", "vpns", "feingold", "thunderbirds", "dha", "wot", "geog", "harrah", "wxga", "nmfs", "boynton", "cashing", "spousal", "abusers", "twinlab", "vick", "aml", "sodimm", "copley", "mallard", "twikipreferences", "airman", "configurator", "clc", "neurobiology", "diamante", "dreamworks", "corsets", "dowd", "escrituras", "bureaucrats", "songtext", "wham", "phpgroupware", "cyclin", "conyers", "youll", "kowloon", "fairytale", "pickens", "bybel", "mln", "wres", "barm", "amplitudes", "nmap", "nvq", "ocd", "ryu", "microcontroller", "premiered", "institutionalized", "hamm", "gyno", "bhopal", "circulatory", "centerline", "chairmen", "guerlain", "pedo", "hussain", "portlet", "proscar", "histone", "opioid", "totalling", "pyobject", "translational", "lehmann", "keaton", "elkins", "jamison", "interstitial", "inest", "tanzanite", "helical", "redlands", "sagradas", "fondue", "windscreen", "adderall", "othello", "supersonic", "pocatello", "maniacs", "sysadmin", "foothill", "earmarked", "highspeed", "uncheck", "rapes", "vlad", "cif", "photosynthesis", "junit", "remotes", "epo", "mcm", "ucf", "nacl", "sfa", "empirically", "dfes", "addon", "pon", "feelin", "callmanager", "deteriorating", "statenvertaling", "cypriot", "entert", "fascia", "woburn", "jalan", "fryers", "cally", "layering", "geriatrics", "picky", "conley", "boces", "barth", "lvm", "mooring", "mcdonell", "expats", "bizarr", "loadavg", "perla", "micheal", "bok", "friendster", "endoscopy", "msx", "buzzwords", "lumen", "airwaves", "jagger", "setups", "inman", "schindler", "limewire", "drawstring", "midrange", "frodo", "superpower", "recliner", "trisha", "trium", "utm", "grimsby", "wyeth", "urs", "kds", "adjuster", "impeccable", "shari", "marketplaces", "tefl", "sudo", "technische", "characterizing", "gawker", "gagging", "cyclist", "atg", "generics", "richey", "magneto", "crunchy", "teletext", "drwxrwxr", "crabtree", "underfull", "hemscott", "webmasterworld", "objc", "musicmatch", "sealant", "timberwolves", "harriers", "shangri", "robo", "roto", "mnem", "nnn", "aidan", "fidel", "executables", "concertos", "vob", "extracurricular", "haverhill", "squirters", "hbp", "tonal", "atr", "ashtray", "gpu", "payton", "psychoanalysis", "hesitant", "poco", "nedstat", "rcmp", "microchip", "eroticos", "fea", "kors", "susquehanna", "userinfo", "modulo", "antler", "bangladeshi", "desking", "nikolai", "nuys", "ludhiana", "rdr", "spankings", "chatrooms", "pretreatment", "brittney", "jer", "tianjin", "qj", "winnebago", "mcfadden", "notecards", "tix", "murfreesboro", "quaternary", "subtracted", "tropez", "mcgovern", "olivetti", "hikers", "vivaldi", "cuties", "lnb", "gilchrist", "preheat", "bernadette", "microdrive", "rookies", "overton", "potpourri", "neiman", "seb", "sigs", "jarhead", "momo", "uzbek", "ttt", "dubya", "signatory", "cim", "energized", "brite", "shs", "minimums", "needlepoint", "deng", "camargo", "oems", "bolle", "webrings", "ehrlich", "azz", "firefighting", "icalendar", "disallow", "exch", "mclachlan", "zaragoza", "brixton", "efi", "kilo", "tcmseq", "moisturizer", "suonerie", "remanded", "empresa", "shoebox", "disagrees", "lowdown", "trove", "filer", "apologetics", "englisch", "texarkana", "threonine", "metart", "siti", "encephalitis", "tomatometer", "arias", "kenner", "anamorphic", "subspace", "cleats", "ifp", "circ", "pressured", "peppermill", "sml", "clarifications", "zionism", "pti", "retin", "klicken", "disjoint", "ema", "openldap", "koenig", "carats", "hijacked", "tch", "burlingame", "checkbook", "candice", "coworkers", "eno", "karla", "cus", "gio", "statm", "haifa", "reincarnation", "budweiser", "heuristics", "tunisian", "hologram", "macular", "eral", "refinishing", "chia", "celestron", "leyland", "reloading", "hombre", "munch", "basf", "rolleyes", "bidirectional", "ahhh", "chica", "starfish", "kurdistan", "boro", "heartbreak", "preps", "irina", "mylar", "congestive", "dmd", "schilling", "twikivariables", "battleground", "tectonic", "equate", "corbis", "inflatables", "naacp", "pathologist", "minnetonka", "langston", "memoriam", "underserved", "rectifi", "elmwood", "fukuoka", "glbt", "rsi", "parr", "pob", "ods", "welles", "gujarati", "sportsline", "leno", "healthwise", "vrml", "sida", "azres", "sapporo", "jscript", "predictability", "pajama", "paddlesports", "adenocarcinoma", "toning", "gestational", "kravitz", "ptcldy", "snowball", "adl", "travelogues", "crl", "zocor", "ecotourism", "leadtek", "hkcu", "morehead", "niro", "fueling", "orthopaedics", "crayons", "tikes", "revamped", "olap", "curfew", "hamlin", "brandeis", "bree", "stylistic", "corneal", "beckman", "crusher", "riva", "prefs", "militaria", "marshfield", "elo", "swank", "matisse", "villeroy", "proactively", "mccarty", "zas", "acdbcircle", "horney", "modeler", "progressives", "grosvenor", "linger", "creationism", "dork", "claritin", "psychosis", "fei", "firsthand", "gigi", "cranston", "hayley", "ags", "muted", "turbidity", "mountable", "kiki", "vz", "avondale", "oceanographic", "zzz", "tsg", "epl", "nonzero", "iwork", "scavenger", "touted", "candace", "kava", "kronos", "adjuvant", "tyneside", "travolta", "sari", "preventable", "bumpy", "aleph", "lga", "conroy", "mastermind", "vaccinated", "coburn", "rawk", "acceptability", "stryker", "surcharges", "noticeboard", "chapin", "permutation", "colpo", "ucsc", "mulligan", "fod", "ketchup", "alimony", "tng", "viscous", "skk", "cmm", "unambiguous", "emphysema", "epistemology", "grantham", "avila", "solana", "toolkits", "soloist", "rejuvenation", "chn", "jse", "anaconda", "bsnl", "carfax", "leveraged", "wega", "scanjet", "ibc", "meng", "burley", "efa", "freesex", "plasmids", "steffen", "xz", "woofer", "lada", "hinckley", "millimeter", "snape", "rollercoaster", "tdc", "connery", "newswatch", "roundups", "keylogger", "parka", "scouse", "unists", "timo", "hea", "spock", "ffs", "bmj", "farrar", "decompression", "draco", "mika", "galena", "msft", "inactivation", "metafilter", "mbna", "lymphatic", "ofc", "gian", "berks", "hdv", "wirral", "boxset", "ashrae", "ilford", "allman", "kroon", "gmo", "sdc", "builtin", "lisboa", "coc", "rollback", "westgate", "thd", "bobo", "crockpot", "weaning", "snowshoe", "hijackthis", "backside", "fetchmail", "candlewood", "angelfire", "ucsf", "painkiller", "nutty", "fenway", "restrooms", "myeloma", "scallops", "osteopathic", "vividly", "rmit", "countermeasures", "ofertas", "gwinnett", "dirs", "duvall", "wildflower", "stackable", "greensburg", "barebones", "merino", "stooges", "chatsworth", "jello", "mtime", "barium", "toric", "looting", "kiefer", "agg", "mauro", "shearer", "decca", "hydrophobic", "unsw", "millard", "btn", "terraserver", "returnable", "ohs", "resuscitation", "cancelling", "rns", "nrg", "stratification", "oliveira", "cahill", "grumman", "webdav", "adagio", "sunburst", "ayumi", "sev", "zt", "bela", "swt", "startups", "ranting", "udaipur", "tonya", "erupted", "ghostscript", "meltdown", "rainwater", "gellar", "alm", "vy", "cnrs", "redefining", "shar", "vesicles", "piccolo", "scalia", "resizing", "showrooms", "verifiable", "lobo", "nunn", "boyds", "havens", "bacterium", "zb", "sideline", "bushing", "ligament", "penpals", "translocation", "costco", "serialization", "wst", "playgrounds", "universidade", "fong", "hbs", "zips", "ntot", "eigenvalue", "conductance", "albemarle", "mudd", "dvs", "niels", "explodes", "lindy", "coimbatore", "panzer", "audioscrobbler", "keri", "soviets", "tweeter", "poncho", "sids", "faerie", "oooh", "oceana", "ayn", "wakeboarding", "stinger", "yuba", "chipsets", "anastacia", "collapsing", "yaoi", "gwyneth", "kuwaiti", "jalbum", "storageworks", "duplicators", "cubicle", "rana", "winfrey", "avanti", "iop", "blige", "papaya", "auger", "macclesfield", "mongoose", "crossfade", "instrumentals", "iconic", "sulfide", "dawg", "mahler", "maurer", "auschwitz", "gambit", "accom", "stb", "uxbridge", "baan", "baumatic", "slt", "landis", "fredrick", "jogger", "occlusion", "jz", "charlize", "covent", "reinvestment", "ssdasdas", "chatterbox", "neutrons", "fss", "silo", "polystyrene", "amon", "jodhpur", "intelligencer", "dundas", "netmag", "molokai", "pluralism", "kobayashi", "tetanus", "bcd", "neuromuscular", "fkq", "caribe", "iit", "nphase", "multifamily", "timres", "nrcs", "farnham", "coors", "execs", "hauser", "citeseer", "hiker", "manuf", "strategist", "electroclash", "outlays", "ktm", "zloty", "osmosis", "mojave", "renova", "hsp", "soothe", "mariposa", "bir", "advancements", "franck", "bock", "fsm", "leary", "slurry", "ker", "dte", "soulmates", "marissa", "sga", "beretta", "chiropractor", "vibrational", "sandusky", "obsidian", "dressers", "winger", "endeavours", "argonne", "runnin", "bfi", "gaye", "colfax", "logics", "camedia", "ctd", "optimise", "ernesto", "voeg", "adamson", "coeds", "subdirectories", "asain", "guilder", "comparator", "sealer", "sleazy", "onstage", "todas", "waterproofing", "devlin", "riel", "pinky", "lewisham", "mints", "wdm", "avocent", "invertebrate", "brea", "rebellious", "carnitine", "trib", "webex", "pairings", "guesthouses", "yikes", "exorcism", "grilles", "mim", "cultivar", "orson", "teammate", "idn", "hrvatska", "sequencer", "grandparent", "demonic", "wonka", "prezzo", "opto", "collaboratively", "oberlin", "nrl", "gorda", "newburgh", "alcoa", "mums", "facs", "lossless", "mmp", "beasteality", "imbalances", "andean", "superconducting", "spectroscopic", "armpit", "dect", "mew", "worsening", "symp", "igf", "metalworking", "groundhog", "clomid", "ginkgo", "decedent", "dimethyl", "retval", "openurl", "baku", "telescopic", "vespa", "phasing", "lactate", "poughkeepsie", "dodson", "monorail", "bookworm", "enero", "sabbatical", "ced", "skeptic", "backlit", "smr", "kentech", "lamette", "gita", "itm", "ath", "hennepin", "foucault", "onshore", "acls", "pwm", "florals", "millimeters", "krauss", "asca", "wicks", "pathologists", "fanfiction", "pathol", "toxics", "ipcc", "kinesiology", "potions", "tern", "squirts", "delmar", "storybook", "grenades", "rls", "etrex", "contrasted", "opting", "hauled", "taupe", "renta", "grd", "odeo", "jiangsu", "osd", "hookup", "myron", "atb", "ctg", "doreen", "altima", "keepsakes", "seawater", "ecko", "zarqawi", "contenders", "conveyors", "accenture", "iagora", "haier", "crutchfield", "fulfills", "rota", "kelso", "petaluma", "ifrs", "servicios", "printmaking", "miata", "julianne", "dotnet", "reconstructive", "metcalf", "vicksburg", "gri", "bookshelves", "supermodels", "glycerol", "wiseman", "sliders", "carhartt", "redford", "itemized", "rsp", "defamatory", "eir", "matheson", "amalfi", "currentversion", "renminbi", "yap", "mangas", "bottlenecks", "pyrex", "huffington", "sculpting", "sedans", "dpt", "hoobastank", "launchers", "finishers", "psychologically", "ssm", "schaeffer", "northside", "interdependence", "microfinance", "droplets", "inducted", "fos", "uninitialized", "conor", "repercussions", "woking", "longmont", "medion", "monika", "hydrological", "runes", "hobbyhuren", "ents", "ortega", "breweries", "landon", "burrell", "forecaster", "quickie", "stephane", "parabolic", "boreal", "bankroll", "bioassay", "martinsville", "ldem", "interventional", "teensex", "tabulation", "joop", "creampies", "trier", "arbitrage", "dogwood", "convergent", "enviar", "hutt", "majoring", "techwr", "glitches", "dugg", "qwerty", "equivalency", "rela", "sedation", "quik", "rosemont", "xk", "harmonics", "devi", "highschool", "orvis", "centimeters", "lavatory", "destructor", "accelerates", "opts", "relocations", "wilco", "tricare", "beckley", "ryde", "januari", "kee", "blacksburg", "anova", "midfielder", "tornadoes", "nand", "ladd", "docklands", "mgs", "tanzanian", "padi", "msl", "clamav", "megastore", "xander", "eon", "winelands", "syllabi", "elif", "lorne", "noida", "visalia", "mykonos", "wcc", "krieger", "safeway", "sheri", "prosite", "wikis", "mozzarella", "glenda", "uta", "dqg", "waterville", "yonkers", "republish", "endoscopic", "dilbert", "vfd", "transen", "konqueror", "feliz", "biscayne", "sexocean", "debconf", "disproportionately", "taskbar", "libero", "synchrotron", "tet", "memorize", "marquez", "williston", "muppets", "volumetric", "umpires", "shuttles", "jumpstart", "motogp", "hyperplasia", "nber", "donahue", "parodies", "prado", "legit", "humax", "scrapped", "ingo", "dillard", "orphanage", "disruptions", "erasure", "preamp", "pde", "mcallister", "ziegler", "loewe", "dowload", "msb", "iptv", "bondi", "freelancer", "felton", "dpp", "umax", "radars", "dmg", "materiel", "megadeth", "cooperstown", "sdh", "staffers", "mawr", "daw", "comptia", "teddies", "upsilon", "sizable", "coenzyme", "enzo", "afterlife", "mather", "ncurses", "harddrive", "cml", "counterpoint", "batesville", "skywalker", "franke", "takashi", "wristband", "jimenez", "esque", "chiller", "barra", "ales", "worthing", "zna", "jonathon", "psr", "sump", "breadcrumb", "sucrose", "amro", "portege", "neogeo", "renewables", "filipina", "sgs", "mbas", "ihop", "cortisol", "banshee", "supersedes", "bullseye", "prezzi", "rbs", "pacino", "cajon", "downloader", "seabrook", "leif", "jrr", "iwc", "taranaki", "chronically", "merkel", "megaman", "setq", "preschoolers", "vcl", "unenforceable", "lto", "busi", "noone", "rotc", "fisheye", "oaxaca", "gerontology", "microsano", "predation", "gaas", "kilimanjaro", "exacerbated", "emr", "infestation", "yarra", "volker", "linearity", "huey", "aerials", "stylist", "porosity", "schofield", "alam", "sprayer", "tirol", "sfu", "gliders", "corby", "wenatchee", "prognostic", "unregulated", "mult", "pittman", "bbl", "hadith", "ots", "kdelibs", "jayhawks", "teesside", "rav", "lobos", "reportable", "dickerson", "carotene", "filesystems", "enrollees", "cena", "sanjay", "compaction", "juicers", "gemm", "methionine", "lala", "toplist", "holyoke", "dewpoint", "rdiff", "osp", "delimiter", "forsaken", "richfield", "hangout", "striptease", "jhi", "amf", "sonicwall", "burgeoning", "unicast", "amnesia", "cipro", "cherie", "klip", "libxt", "menswear", "inthevip", "wrenches", "actuate", "capote", "cvd", "flexeril", "molar", "databank", "montevideo", "sunglass", "lhs", "kassel", "followings", "shipley", "accretion", "asha", "bullpen", "mamas", "schreiber", "gnc", "dysplasia", "freeroll", "efl", "igs", "utopian", "kota", "iden", "dil", "wia", "sosa", "negril", "hyped", "epidermal", "autopilot", "garza", "decrypt", "batik", "crain", "subd", "utilising", "dsu", "fermanagh", "idr", "interoperable", "mam", "delano", "sonja", "plex", "compat", "replaceable", "forint", "nudism", "netcom", "formulary", "irvin", "galery", "hounslow", "fosamax", "striping", "excavating", "recoveries", "mrsa", "mainstreaming", "awt", "hola", "hoody", "dci", "geri", "seasonings", "marcelo", "pantech", "fcp", "scaricare", "roxbury", "clamping", "whiplash", "dildoes", "takeoff", "wiggle", "truely", "henna", "cartesian", "gamezone", "yank", "llewellyn", "shag", "asymmetrical", "universitat", "williamstown", "trolleys", "interlocking", "doped", "headband", "internetweek", "outperform", "ncp", "harmonization", "hamid", "differentiating", "hitters", "konrad", "wickets", "restarting", "bcm", "xilinx", "wideband", "tmobile", "rocha", "pbox", "aea", "stevenage", "moorhead", "directorio", "restructured", "aerodynamic", "hopewell", "evaluative", "zuma", "annuaire", "subtracting", "bram", "kuna", "logbook", "xor", "louth", "pict", "truetones", "gabor", "rotates", "ezcontentobjecttreenode", "leanne", "bgcolor", "rescues", "wim", "corsa", "causality", "tiling", "ethnographic", "waffles", "doubly", "fandango", "powermac", "catalysis", "annexes", "lisle", "pushj", "naylor", "wrongdoing", "paducah", "gunter", "iranians", "aat", "commandos", "abcd", "repeatable", "deh", "epiphone", "scf", "weekender", "milner", "schott", "welders", "semifinals", "quantization", "surfacing", "vegetarians", "hagerstown", "polyclonal", "transponder", "gottlieb", "withdrawl", "geneid", "tierney", "glock", "guatemalan", "iguana", "glaring", "cifras", "salman", "choker", "ecologically", "scoreboards", "mohr", "dpa", "spaceship", "digimax", "moremi", "btc", "technologie", "tunica", "powerbuilder", "aorta", "unconfirmed", "dimitri", "degenerative", "delve", "torrey", "celica", "beloit", "nir", "substr", "lowrance", "ballantine", "crimp", "bss", "mousepad", "umbria", "oregano", "rashid", "microtek", "geary", "boaters", "soyo", "visualisation", "brianna", "handlebars", "weightloss", "interconnects", "playtime", "enrollments", "gyllenhaal", "criticality", "geoscience", "mhonarc", "golive", "deville", "meh", "moseley", "spacers", "unido", "deferral", "hersh", "hilliard", "vlsi", "keegan", "feces", "uy", "bute", "activewear", "transcriptions", "metered", "bugfixes", "cami", "interna", "quintessential", "babycenter", "gardena", "cultura", "stockpile", "psychics", "pediatr", "williamsport", "westlaw", "hetero", "meteorite", "extruded", "lakh", "starware", "phage", "laszlo", "hernando", "vogt", "wolfpack", "lags", "eldridge", "wray", "hajj", "edirectory", "longstanding", "knitwear", "apocalyptic", "fatties", "darmstadt", "mco", "ucsb", "fillings", "marti", "aberystwyth", "infineon", "fdd", "inflows", "tmpl", "estuarine", "lita", "nubuck", "socialization", "estock", "mbit", "valign", "caving", "vec", "alkyl", "artichoke", "leasehold", "directgov", "ubiquitin", "fuerteventura", "hairdressing", "dhhs", "fecha", "nio", "wsi", "quigley", "yellowpages", "pretec", "biomechanics", "microcomputer", "discipleship", "hella", "womack", "magnifier", "acdbtext", "pitney", "esters", "haan", "ofcom", "ablation", "nutcracker", "dosages", "prn", "zm", "dfs", "multiplexing", "indentation", "hazmat", "eac", "dalhousie", "ahem", "retardant", "shankar", "overheads", "southfield", "iee", "gnustep", "spm", "azkaban", "dermal", "metar", "sizeable", "aftershave", "lahaina", "earners", "tenderloin", "dji", "ipp", "chee", "hamburgers", "oliva", "gaultier", "cios", "margie", "nms", "wandsworth", "caltech", "stapleton", "gsc", "francophone", "sqm", "xoxo", "coord", "mocking", "nri", "serengeti", "raccoon", "shrinkage", "prd", "uris", "hamsters", "codphentermine", "thrashers", "calibrate", "gilmour", "rambo", "cleburne", "serrano", "niacin", "strawberrynet", "wesson", "ormond", "oxycontin", "bibliographical", "wynne", "glyph", "nagios", "marinated", "marko", "sfas", "genotypes", "conde", "alford", "madurai", "evacuees", "urbanization", "kilgore", "unwired", "elseif", "pneumoniae", "skyscraper", "ebags", "gnn", "tooled", "intermec", "charlottetown", "submersible", "condensate", "matchup", "undefeated", "krs", "movin", "kino", "vidio", "photographing", "pocono", "footjobs", "trackers", "kinkade", "unify", "dissident", "sperry", "iframe", "tur", "commu", "xterm", "swapped", "stent", "vermillion", "angiography", "areaconnect", "brockton", "daz", "abcdefghijklmnopqrstuvwxyz", "dunst", "livonia", "specialisation", "nsi", "walgreens", "plasticity", "crux", "nhra", "armband", "leamington", "mosley", "iga", "stemmed", "appleby", "grayscale", "labonte", "lek", "cartoonist", "flotation", "geol", "deterrence", "cardin", "aardvark", "cosmological", "dothan", "isotopic", "hadleionov", "langford", "ssg", "understated", "obit", "unt", "randomised", "amphetamine", "shia", "grout", "reba", "wrx", "rsgi", "bharat", "sls", "slg", "kilometre", "tristar", "gippsland", "pastels", "stallions", "paramedics", "fishbase", "rolla", "curie", "bootable", "skit", "sourcewatch", "decimals", "boe", "catania", "countertops", "paola", "elwood", "hocking", "prerelease", "seqtype", "femoral", "anz", "visceral", "fructose", "edta", "silverstein", "broderick", "zooming", "hamasaki", "keswick", "extinguisher", "subpoenas", "spiele", "rincon", "pll", "donny", "vitale", "fledgling", "boinc", "traversal", "bagder", "erick", "kcal", "midfield", "hypersensitivity", "redshift", "glaser", "sado", "cusco", "imagemagick", "uic", "fernandes", "prosthesis", "jsc", "omron", "alberghi", "electricals", "kelp", "taker", "placeholder", "moulton", "yall", "npdes", "massages", "catalist", "metarating", "tupelo", "syriana", "batt", "dbms", "asb", "videotapes", "backseat", "kauffman", "manipulations", "accomodate", "tioga", "aylesbury", "submenu", "kwacha", "chondroitin", "sandpiper", "vamp", "overarching", "janes", "selectors", "condoleezza", "internationals", "estuaries", "schulze", "osti", "paleontology", "emporio", "stepper", "reykjavik", "waterskiing", "renfrewshire", "superheroes", "marg", "leftovers", "mariano", "bangboat", "guestrooms", "urethane", "stoughton", "paphos", "sprinklers", "accum", "bms", "datsun", "sainsbury", "chefmoz", "helo", "yvette", "procmail", "midsole", "ayuda", "geochemistry", "reflectivity", "moog", "anth", "durand", "linea", "butterworth", "datagrid", "metetra", "rodrigues", "apprenticeships", "oncol", "dop", "asymptomatic", "retails", "offroad", "simpletech", "gandalf", "minot", "evidentiary", "kpa", "whelan", "synthesize", "doan", "localisation", "laparoscopic", "pem", "hotelguide", "bayview", "overridden", "sorensen", "hinds", "managment", "racially", "stinky", "riverton", "expertly", "mgc", "langkawi", "ftpd", "colloidal", "guarantor", "imperialist", "suc", "veneers", "reaffirmed", "zambezi", "tibia", "raquel", "wpt", "kiddie", "tulare", "venturi", "sundries", "linebacker", "danzig", "neurol", "beanies", "irreducible", "trixie", "ridgeway", "henckels", "srb", "verifier", "dimensionname", "eurasian", "galbraith", "pesky", "underwire", "salvia", "aep", "radioshack", "sportstar", "alana", "upd", "duma", "osh", "ddbj", "stah", "scripted", "ated", "mutagenesis", "posada", "vocalists", "tiburon", "lpc", "geiger", "cmyk", "everlast", "obits", "jekyll", "sportsbooks", "andaman", "hallam", "spoofing", "rockhampton", "reauthorization", "poolside", "xiamen", "trc", "pita", "chopard", "skeptics", "nast", "motorist", "kwik", "peritoneal", "jaffe", "freebie", "harare", "tunbridge", "spycam", "lowes", "lineto", "ncaab", "publicize", "neohapsis", "sanibel", "bulimia", "newquay", "intros", "ladybug", "analyser", "armando", "conwy", "algorithmic", "rectifier", "banknotes", "aem", "bookshot", "bassoon", "scrapbooks", "hydropower", "clearances", "denominational", "dominguez", "meas", "tamron", "dfid", "vlans", "spreader", "deu", "otolaryngology", "ezines", "vbseo", "snowmobiles", "oca", "phen", "educa", "lagrangian", "dubrovnik", "idt", "eases", "hippocampus", "crim", "repeaters", "longoria", "matsushita", "reimbursements", "kotor", "encodings", "yuen", "eqs", "eca", "actionable", "gangbangsquad", "cornea", "overfull", "southgate", "minibar", "kitchenette", "ols", "liberian", "tuc", "hth", "repairers", "liczniki", "rcc", "numerology", "armitage", "brac", "barware", "corsi", "normalize", "gsp", "bcr", "krt", "buffs", "tamoxifen", "phenotypes", "kinross", "kieran", "informatie", "mccallum", "triplet", "geosciences", "sonics", "timmins", "django", "pllc", "lotta", "upg", "nhtsa", "swissprot", "archaeologists", "voss", "pussys", "moveto", "tentacle", "stx", "iaudio", "prednisone", "salespeople", "motility", "dengue", "gaiman", "incineration", "dumont", "shanks", "bissell", "organza", "centralised", "unbreakable", "supersized", "depictions", "wml", "sexcams", "kaffe", "karim", "aww", "gtc", "pbl", "cael", "separators", "informatique", "resetting", "indepth", "funnies", "cumin", "chicagoland", "keystrokes", "setters", "inertial", "payless", "ona", "pec", "payee", "cinematographer", "preorder", "oig", "teenies", "ppv", "ventilator", "annonces", "camelbak", "klear", "micrograms", "pediatrician", "cymbal", "convective", "haymarket", "nosed", "bre", "shogun", "rescheduled", "bala", "sidestep", "readline", "preemption", "microbiological", "corticosteroids", "pseudoephedrine", "stockholder", "engnet", "quanta", "sturgis", "synapse", "cwd", "innostream", "airplay", "uppers", "sib", "pitman", "bodrum", "leathers", "embossing", "redirects", "fuzz", "roscommon", "meryl", "izmir", "meticulous", "multiplexer", "menorca", "dendritic", "minima", "wstnsand", "naproxen", "operands", "mikael", "conceptually", "crichton", "cct", "nics", "hardwoods", "clarita", "xfs", "capping", "parisian", "humanism", "hiroshi", "hipster", "accel", "annualized", "sandi", "npa", "becca", "basildon", "khoa", "testis", "uclinux", "unusable", "tigger", "approximated", "dhea", "consulates", "wonkette", "versioning", "breakdowns", "dbh", "periodontal", "macmall", "iphoto", "uncredited", "recordi", "lacroix", "rupiah", "bullish", "hippy", "klik", "northerner", "xsd", "mackintosh", "kenney", "fabricators", "mutated", "layne", "moonstone", "scilly", "sheng", "fsp", "yk", "strep", "offical", "hps", "tampere", "testo", "synergies", "fundamentalists", "amyloid", "emachines", "understandably", "icarus", "appletalk", "goff", "dialed", "geoxtrack", "bemidji", "harcore", "intermodal", "spx", "catalunya", "baymont", "niall", "mitts", "rik", "nappy", "diario", "khalid", "fuchsia", "chowhound", "muscat", "ffff", "kmart", "handover", "knott", "butterfield", "hialeah", "finney", "salamander", "driveways", "ummm", "ayres", "lukas", "cavan", "aswell", "skippy", "marginalized", "sooners", "cityguide", "maritimes", "permanente", "texaco", "bookmakers", "speci", "hgtv", "contacto", "mbc", "marston", "newsline", "coverages", "bap", "specialities", "loca", "systematics", "renderer", "matsui", "rework", "snowmass", "deq", "rosh", "coffs", "cleansers", "acu", "webby", "footbed", "inicio", "moretrade", "apogee", "allergens", "worsen", "mlc", "applica", "tankers", "whopping", "issey", "rtr", "bes", "cust", "brookes", "anim", "tull", "informatica", "computeractive", "finline", "permissionrole", "quickcam", "shunt", "rodeway", "scrollbar", "breen", "voyuerweb", "mbe", "kenshin", "dpm", "clackamas", "synch", "patten", "leppard", "allis", "estimators", "functionalities", "rmt", "downes", "koffice", "evidences", "mux", "dbx", "fetishes", "isaacs", "outrigger", "enclave", "fibrillation", "licorice", "statically", "ipl", "dixons", "goldmine", "lhasa", "developmentally", "ziggy", "ingles", "senders", "steamy", "atf", "madhya", "marinade", "passwort", "extinguishers", "stratosphere", "tbilisi", "updater", "geico", "fld", "cabos", "companys", "tinputimage", "ggg", "nicaraguan", "icn", "wanganui", "sconces", "insulator", "endometrial", "mohan", "hegemony", "focussing", "gallerie", "bioperl", "eprint", "tennant", "ebp", "tryptophan", "checkin", "gilroy", "extensibility", "aei", "qg", "mcculloch", "thang", "lorem", "seng", "bianco", "salma", "consortia", "asimov", "renato", "bungee", "murdock", "hokkaido", "alternates", "brdrs", "configures", "multilevel", "mvs", "pce", "albertson", "renoir", "getclass", "perthshire", "mucus", "suspenders", "realtek", "morons", "dismantle", "pharos", "obp", "zovirax", "twikiguest", "reimplemented", "eavesdropping", "orgs", "numerator", "gds", "nme", "resurgence", "metastases", "gino", "timings", "mecha", "carburetor", "merges", "lightboxes", "icra", "jeopardize", "ltp", "loews", "fanlisting", "flet", "bds", "hyland", "experian", "screenwriting", "svp", "keyrings", "hca", "hdc", "hydrolase", "koa", "mobilized", "accutane", "zonealarm", "sexkontakte", "canaveral", "flagler", "someplace", "vcard", "antibacterial", "rund", "extremism", "edgy", "fluctuate", "tasked", "nagpur", "funroll", "tema", "flips", "petsmart", "libuclibc", "chaney", "aventis", "macrophage", "palmas", "useable", "ferndale", "saipan", "councilor", "tcr", "myinfo", "jellyfish", "newington", "reissued", "mpv", "noa", "airconditioning", "wiggles", "bho", "synths", "kennesaw", "rubbermaid", "spector", "medica", "ayer", "incumbents", "ashok", "vern", "writable", "usepa", "reflectance", "mobo", "bunn", "chiba", "uint", "tgb", "yj", "coliform", "selena", "olmsted", "broomfield", "darpa", "nonpoint", "realignment", "undermines", "ferreira", "sasl", "defibrillators", "kraus", "certs", "nwa", "jstor", "aarhus", "supercomputer", "bouncer", "phenol", "jigs", "loudoun", "lifetimes", "grundy", "histamine", "byline", "mbox", "mustafa", "bedlam", "ioexception", "abdel", "bothell", "synergistic", "aur", "lippincott", "maplewood", "tillman", "maints", "rhp", "handball", "shandong", "cch", "stylized", "folate", "lenoir", "manitou", "cytometry", "goofs", "wokingham", "connors", "musc", "ripon", "nypd", "plexus", "systolic", "hyman", "unreachable", "deepak", "desarrollo", "tian", "jisc", "merc", "covina", "noonan", "ufc", "modernist", "waring", "janie", "fams", "yasser", "weathering", "totalitarian", "putters", "waypoint", "prx", "interrelated", "delray", "lifedrive", "santander", "southbound", "solidworks", "cronin", "averatec", "huren", "patios", "firebox", "synopses", "venta", "sadr", "tuples", "brdrnone", "diarrhoea", "sonatas", "barbecues", "walther", "deadwood", "mancini", "rpmlib", "milpitas", "commonsense", "bsi", "piii", "romford", "emporia", "digidesign", "violators", "phrasebook", "reconfiguration", "sledding", "lakefront", "excision", "traceability", "yangon", "booktitle", "lemony", "recursively", "ney", "kilda", "auctioned", "hennessy", "basset", "antwerpen", "paltrow", "rda", "limiter", "imtoo", "jmp", "cornwell", "dah", "blueberries", "notting", "comprehensively", "amar", "deftones", "apg", "zyxel", "kno", "limelight", "schmid", "alg", "bme", "solis", "cdx", "mju", "hoosiers", "criss", "glynn", "aerotek", "unmet", "toa", "competes", "olathe", "ciw", "compositional", "sez", "trig", "taylormade", "catawba", "mbytes", "ordinal", "tth", "inglewood", "gila", "magnitudes", "downed", "firstname", "metairie", "polluting", "wellcome", "pedicure", "duplexes", "edgewall", "webchanges", "backplane", "daschle", "transceivers", "disrupting", "biodegradable", "spore", "meps", "phpmyadmin", "bloodrayne", "tessa", "unrealized", "hei", "artistas", "roomate", "acetone", "alanine", "elko", "dvdrw", "spt", "ries", "inthe", "blitzkrieg", "nickels", "banbury", "igm", "snf", "optra", "choctaw", "issaquah", "interactively", "fredrik", "aventura", "ewa", "dpic", "mufflers", "quarks", "refactoring", "monrovia", "forman", "marrakech", "optoma", "walkways", "heineken", "shelbyville", "oxidized", "bugfix", "sharif", "bloodstream", "yx", "underpinning", "resistivity", "hollinger", "conformal", "racquets", "sherri", "dbd", "nevermind", "moa", "tenchi", "potters", "detergents", "cheri", "bombardier", "subsp", "cytotoxic", "frag", "eseminars", "colophon", "morin", "ico", "tatum", "unforgiven", "thesauri", "gaffney", "harrell", "toowoomba", "friendfinder", "uts", "bootsnall", "relais", "allocates", "freecom", "yoo", "kabbalah", "dgs", "punks", "chorley", "ivanov", "unannotated", "endian", "dari", "patchy", "haters", "mutex", "worldnow", "giuliani", "hina", "millennia", "pathophysiology", "frith", "pao", "doran", "remixed", "hypoxia", "newyork", "penile", "hemi", "positron", "metallurgical", "ordinating", "caregiving", "molybdenum", "easley", "plo", "psn", "hexagonal", "throated", "contravention", "bacteriol", "healers", "superbike", "biosafety", "binomial", "engels", "staybridge", "mullet", "canfield", "hardball", "orem", "scholl", "renovate", "dvdr", "phenterminebuy", "metformin", "actuary", "addressbook", "xquery", "csl", "purdy", "rattus", "xian", "latches", "ardmore", "cosmetology", "emitter", "wif", "grils", "yom", "ralston", "estados", "begining", "apartamentos", "sassoon", "tna", "hotlog", "duquesne", "oclug", "formatter", "rhinestones", "shootings", "splitters", "gdm", "pizzas", "contig", "whittaker", "trafic", "winders", "walkie", "adorama", "uucp", "postmarked", "devolution", "avion", "innes", "reunification", "izumi", "caenorhabditis", "moderating", "gadsden", "cthulhu", "eurostar", "dooley", "diebold", "unsaturated", "hotsync", "ryerson", "bfd", "nonexistent", "liquidated", "decoders", "validates", "dae", "jackman", "biophysical", "mendes", "lasagna", "landers", "belton", "qing", "docu", "tapas", "calla", "curriculums", "supermodel", "rezoning", "schumer", "exclusivity", "motivates", "debuted", "lifeguard", "chrissy", "havasu", "kei", "danforth", "kilmarnock", "bignaturals", "hendersonville", "poweredge", "sequels", "licensor", "pantone", "granby", "laboratoire", "headteacher", "viajes", "etosha", "ndc", "coexistence", "leona", "dpr", "brownfield", "aguilar", "supervises", "orthologs", "pataki", "redistricting", "jil", "amritsar", "lpi", "pram", "acqua", "mekong", "anesthetic", "dsi", "maduras", "pfi", "paperless", "perc", "fansites", "sherbrooke", "egyptienne", "hyn", "anisotropy", "heaton", "rennie", "sno", "redox", "cladding", "seaworld", "hotlist", "trumbull", "retransmission", "luau", "tiscali", "overlaps", "meticulously", "sitka", "ucs", "lsr", "hellboy", "jakub", "hanselman", "rangemaster", "interceptions", "rrc", "dyna", "appt", "nonviolent", "evangelicals", "cunny", "goddamn", "wolfowitz", "epping", "accra", "bimbo", "jamboree", "multicolor", "tritium", "ptfe", "leaching", "sauer", "cricinfo", "isomorphism", "lsat", "estab", "stockbridge", "invariants", "jillian", "islip", "egp", "didier", "capistrano", "yardage", "neve", "enviro", "gte", "bodybuilders", "ranchers", "bremerton", "wbc", "radii", "schwinn", "expander", "regt", "referer", "electrolysis", "signatories", "wetsuit", "flatrate", "vendita", "nazionale", "peroxidase", "folkestone", "angkor", "delcampe", "taylors", "rahul", "mmr", "zp", "vserver", "neurologic", "chd", "opac", "cmv", "macabre", "neurontin", "popeye", "gruber", "excerpted", "spotter", "pyongyang", "hmos", "beltonen", "chamonix", "recycler", "declarative", "semaphore", "dprk", "carmarthenshire", "tristate", "standardize", "recyclable", "knickers", "overloading", "angioplasty", "fanboy", "sharapova", "moen", "irin", "deseret", "eastbay", "bfa", "androgen", "parkes", "kilogram", "pacemaker", "duarte", "evaluators", "tarball", "nears", "kapoor", "pah", "allard", "mog", "tures", "standout", "lll", "holley", "ogs", "ptt", "sfs", "transamerica", "bdrm", "comparability", "buckhead", "industrialization", "cabana", "mbr", "yoshi", "skokie", "catwalk", "homesite", "pecos", "stinson", "blurry", "etrust", "minibus", "coty", "denby", "openbook", "unfunded", "jobsite", "dls", "levinson", "kasey", "disbursed", "cristian", "ballooning", "nats", "antineoplastic", "amplify", "shitting", "coden", "congressmen", "dft", "xsp", "strapless", "qualitatively", "struc", "whitefish", "flourished", "ejection", "puyallup", "bonham", "miu", "cosplay", "gazduire", "dodgy", "parasitology", "thymus", "handlebar", "sanborn", "beale", "lesbianism", "locators", "belive", "mnogosearch", "aoa", "childress", "pppoe", "phytoplankton", "wireline", "handpainted", "suprise", "neath", "casseroles", "generational", "coppola", "burrito", "sandton", "spylog", "biltmore", "coriander", "edtv", "chopra", "streamflow", "montoya", "lesbien", "manipulative", "hypnotize", "liaisons", "backers", "evocative", "mcclelland", "centerfold", "burch", "chesterton", "warlord", "guage", "powerball", "snider", "creuset", "wildland", "oster", "conti", "sichuan", "wrigley", "bollinger", "sensitivities", "offshoring", "uiq", "bayes", "vipix", "amphibian", "substation", "optically", "ceasefire", "haag", "alj", "swartz", "nanoparticles", "affine", "sitios", "woot", "obo", "uname", "employmentnew", "sepa", "asrock", "hijacking", "blurbs", "downsizing", "subcutaneous", "creatinine", "factorization", "netbios", "fleshlight", "reliever", "ender", "indenture", "arlen", "trailblazer", "coney", "avenida", "ern", "shocker", "barnstable", "ioctl", "bronte", "refrigerant", "caterham", "bajar", "movei", "barkley", "datacenter", "presidio", "transfection", "fung", "legg", "moyer", "roux", "rectangles", "caseload", "catharines", "pdx", "wget", "collaborator", "cruzer", "eeoc", "tnc", "cnw", "sausalito", "clas", "xenopus", "reflectors", "endorsing", "qingdao", "kiwanis", "onlinephentermine", "replicator", "assertive", "aldershot", "weirdness", "oblast", "townhall", "sunnyside", "datos", "pham", "glycogen", "tain", "selangor", "detainee", "brd", "hoosier", "balearic", "toluene", "jini", "tubal", "longford", "johansen", "photocopies", "haccp", "narconon", "dyno", "blakely", "klonopin", "photonic", "kyiv", "tami", "hijackers", "buell", "informazioni", "mccracken", "ultrasonography", "cale", "alyson", "taupo", "possum", "milligan", "rosacea", "transgendered", "thos", "toxicological", "mackey", "ristorante", "obama", "dvc", "jermaine", "platypus", "breakbeat", "karina", "jang", "thereunder", "kink", "winton", "holla", "multilayer", "strcpy", "xzibit", "mohair", "chore", "agb", "prt", "abm", "kgb", "preemptive", "guzman", "subcontracting", "counterterrorism", "communicators", "embodiments", "sociedad", "taskforce", "gatineau", "pertussis", "concentrator", "astrophysical", "apap", "pairwise", "nagy", "hofstra", "kbs", "filmstrip", "shortcake", "hsm", "chilliwack", "bidorbuy", "tetracycline", "lovett", "motorhead", "salam", "hofmann", "paramilitary", "flipper", "eyeball", "outfitter", "rsl", "minden", "hardwick", "immunological", "wifes", "phenyl", "telefax", "giao", "famously", "hattiesburg", "telematics", "tsai", "maier", "lca", "bossier", "franchisees", "falco", "armin", "ique", "controllable", "surfactant", "telecommuting", "culvert", "prescriptive", "wcag", "hott", "spanner", "mchugh", "firehouse", "currys", "diadora", "laporte", "wgbh", "telekom", "puri", "factsheets", "karts", "orthodontic", "visors", "leste", "lithography", "bonobo", "hamptons", "proofreading", "rmx", "evokes", "jdm", "dehydrated", "whyte", "interop", "initializing", "manfrotto", "waveguide", "pnc", "aussies", "murtha", "reinhard", "permaculture", "suburbia", "kamal", "catwoman", "optimally", "darko", "windstar", "polymorphisms", "sexist", "mdm", "embryology", "styrene", "alumnae", "inducible", "riesling", "triage", "ees", "krugman", "mrt", "mazatlan", "silencer", "foreclosed", "chernobyl", "rigby", "allergen", "crystallography", "frosting", "gallbladder", "photogallery", "nightwear", "sconce", "vgc", "drivetrain", "skelton", "ovaries", "mamob", "phenterminecheap", "daddies", "impressionist", "tourisme", "hpi", "clif", "fairways", "watercolors", "klipsch", "tekken", "lactic", "bydd", "katana", "ameriquest", "boson", "culo", "milled", "mcarthur", "analgesic", "mya", "btec", "geez", "crocheted", "acetylcholine", "modblogs", "pud", "firsts", "ferrets", "enlight", "wop", "twas", "menzies", "agonists", "eisner", "staroffice", "acg", "photometric", "fokus", "ntc", "buzzer", "tok", "trams", "vickie", "tinnitus", "vectra", "benidorm", "gerrard", "marketworks", "libertarians", "downers", "kevlar", "sequestration", "yoshida", "inositol", "praia", "follicle", "itemsshow", "brunner", "indore", "inspectorate", "ultralight", "toutputimage", "saudis", "octal", "debilitating", "twd", "keypress", "notifyall", "hdf", "corrs", "turku", "centrifuge", "curators", "multipoint", "quang", "marla", "mths", "caffe", "projective", "fandom", "cws", "kao", "debacle", "argh", "tts", "plantings", "landmines", "kes", "sdd", "khaled", "kimmel", "famc", "tva", "arbitrators", "deakin", "instock", "gilligan", "unh", "unpossible", "waldron", "kihei", "daq", "bronchial", "emg", "nanoscale", "hmong", "brownfields", "emmylou", "antcn", "unilaterally", "hypoglycemia", "sodomy", "bukakke", "bigpond", "famosas", "nsync", "zd", "revaluation", "conditionally", "moira", "tenured", "padd", "amato", "debentures", "rfcs", "acyl", "rehoboth", "lmc", "dht", "drucker", "lmi", "tham", "cigna", "dlr", "nifl", "sealy", "axa", "carrey", "ige", "dde", "foy", "evesham", "mcneill", "manitowoc", "baguette", "haves", "erections", "overpriced", "grantor", "sux", "orbiting", "soares", "gsl", "ihep", "resubmit", "bader", "gymboree", "kyo", "yunnan", "miyake", "rah", "saggy", "subtypes", "moultrie", "vasquez", "iogear", "merch", "uplinked", "cognos", "northbound", "cardigans", "ket", "rasa", "taglines", "usernames", "gpsmap", "ngn", "midweek", "pirelli", "rialto", "tvw", "durations", "bustle", "trawl", "shredding", "reiner", "risers", "taekwondo", "ebxml", "unedited", "inhaler", "granularity", "albatross", "pez", "formalized", "retraining", "naa", "nervosa", "jit", "catv", "certificated", "spicer", "karsten", "surfboard", "scl", "garfunkel", "handguns", "ideograph", "papillon", "dmn", "citywide", "stingray", "bmo", "toscana", "analsex", "larsson", "franchisee", "puente", "epr", "twikiusers", "tustin", "physik", "savute", "slinky", "cubase", "weatherproof", "parkplatz", "roadsidethoughts", "oxy", "pthread", "postmenopausal", "mixtape", "tuxedos", "fujian", "batters", "gogo", "nca", "minivans", "yerevan", "duffle", "scraper", "posner", "bwv", "technet", "sdsu", "decl", "lombardi", "musi", "unger", "gophers", "brando", "ksc", "multifunctional", "noes", "relist", "webjay", "vtr", "haworth", "transfected", "dockers", "swg", "screwdrivers", "tir", "guitarists", "manta", "christa", "sff", "moffat", "surfboards", "deteriorate", "compo", "roos", "eesti", "caulfield", "midpoint", "orland", "malagasy", "shoplocal", "standardisation", "matlock", "nair", "polymorphic", "emd", "phenomenology", "substantiated", "slk", "phong", "bandera", "cred", "lorry", "recaps", "fet", "resolver", "kagan", "chiu", "anthropologist", "opcode", "jugg", "revamp", "herbarium", "grb", "readonly", "arista", "barcelo", "unknowns", "kean", "coq", "cpo", "brosnan", "chamomile", "tgf", "mobilizing", "anya", "allo", "geddes", "wayland", "cerro", "methylation", "ecol", "clanlib", "jayson", "prostatic", "uj", "metcalfe", "oppenheimer", "mcclintock", "android", "primaries", "converges", "lation", "anisotropic", "voorraad", "ucr", "mxn", "ambrosia", "springboard", "rubella", "eisenberg", "bif", "constitutive", "vesa", "signoff", "guggenheim", "sapphic", "killington", "otr", "intec", "xem", "instawares", "kearns", "showcased", "summerfield", "cooperatively", "oshawa", "targa", "triplets", "hec", "billionaire", "leucine", "jobless", "slingshot", "cutout", "disgruntled", "coker", "selinux", "crosslinks", "resurrected", "skyscrapers", "spamalot", "sfp", "noob", "crb", "moviefone", "beecher", "goog", "mdgs", "democratization", "biostatistics", "sakaiproject", "cilantro", "equ", "xilisoft", "zc", "terracotta", "garvey", "harford", "pcie", "dartford", "dicaprio", "rosso", "onlinebuy", "gilliam", "certiorari", "walkin", "contributory", "applescript", "esol", "giggles", "suture", "jacobi", "fark", "autoblog", "glaxosmithkline", "dof", "sextoys", "tice", "accor", "buford", "uspto", "balfour", "calipers", "penalized", "pyruvate", "loggers", "envi", "kissinger", "rmc", "whew", "orchestrated", "conformational", "choreographer", "mcsa", "impressionism", "bucknell", "martino", "cranbrook", "taz", "ocp", "subdomain", "precios", "simcoe", "abnormality", "varicose", "newtonian", "genova", "libor", "infomatics", "hyannis", "howland", "federations", "syed", "urination", "bewertung", "broadcom", "cautionary", "escalate", "spotters", "kucinich", "noosa", "sider", "mitral", "dafa", "verdes", "inproceedings", "crestwood", "takingitglobal", "dmz", "antisocial", "baz", "gangsters", "daemons", "foundational", "probs", "huntley", "kanpur", "uah", "elven", "isotropic", "adodb", "enlaces", "edelman", "rubinstein", "flier", "griswold", "ome", "carcinogenic", "micr", "rrna", "goverment", "mercado", "lum", "dekker", "supercharged", "magicyellow", "primavera", "timescale", "fico", "overwritten", "marcinho", "kor", "erb", "keanu", "edina", "perle", "lebron", "terminally", "bundaberg", "lbo", "breyer", "kochi", "pirated", "leavers", "vpl", "pubsulike", "aquifers", "nittany", "dakine", "rescuers", "amsoil", "revitalize", "messageboards", "lakeville", "apotheon", "eukaryota", "permeable", "rsm", "lastname", "pxi", "faxless", "napalm", "annuncio", "usmle", "racetrack", "atenolol", "riveting", "cbbc", "absorbers", "xseries", "biweekly", "parkside", "rez", "hows", "posi", "derailed", "shoebuy", "ashworth", "keira", "meadville", "skynyrd", "threechannel", "fid", "rua", "monologues", "subroutines", "subspecies", "penton", "eoc", "figleaves", "bab", "ketchikan", "immagini", "shafer", "qca", "broiler", "ctn", "lickers", "akbar", "cbl", "skimpy", "fisa", "reflexive", "drool", "godin", "exchangers", "interbase", "sepsis", "appli", "boxdata", "laing", "oscillators", "choline", "doolittle", "trikes", "pdm", "joerg", "removers", "grisham", "diffuser", "indesit", "rouble", "kamasutra", "camila", "belo", "zac", "postnatal", "koizumi", "tallied", "ikezoe", "niggas", "lorain", "tko", "keying", "ballpoint", "kq", "lupin", "eidos", "computerised", "maf", "rsv", "munson", "ftm", "munoz", "hbv", "jeffersonville", "willfully", "orienteering", "eoe", "cavs", "humphries", "puss", "ngs", "podiatry", "truffle", "taka", "beal", "kalahari", "blockage", "hallo", "abo", "recv", "obstet", "bulma", "chicos", "cliche", "sadc", "tolar", "screenname", "chlorinated", "hypothesized", "upbringing", "fmc", "newry", "zonal", "defun", "unsustainable", "maas", "ghostbusters", "interdependent", "rockwood", "dbe", "asda", "civics", "literals", "unanticipated", "seminoles", "plist", "tabulated", "workloads", "chemo", "vhdl", "pretrial", "fermilab", "hotplug", "rotator", "krups", "myosin", "mtx", "carpool", "honky", "matsumoto", "armpits", "clug", "gasolina", "caruso", "fsh", "joysticks", "visualized", "bosworth", "soic", "clitoral", "bers", "carsten", "riverwalk", "convertibles", "literotica", "pgm", "ringetoner", "tpm", "floorplan", "oscilloscope", "getz", "mgd", "dictators", "levees", "annandale", "hillel", "jeffries", "pacheco", "slacker", "miva", "sns", "gca", "xchange", "kraftwerk", "bandana", "pentecostal", "extrapolation", "fennel", "telemark", "spg", "quy", "datasheets", "smit", "flywheel", "futons", "interviewees", "mosfet", "maryville", "oskar", "ital", "quarkxpress", "nondiscrimination", "republika", "icici", "fixings", "leith", "kickboxing", "deming", "deactivated", "caliente", "oligonucleotide", "crtc", "golgi", "channeling", "stopwatch", "maroc", "lemieux", "subscript", "starfleet", "odi", "substandard", "phenterminephentermine", "phoned", "ncl", "gmtime", "convener", "becuase", "dailies", "dansguardian", "miramax", "busta", "maury", "cng", "jizzshot", "moya", "nackt", "commercialisation", "cunni", "cardinality", "machado", "insurances", "qn", "tinting", "epidemiologic", "isset", "burnie", "bushings", "radionuclide", "typeface", "changeover", "jian", "termites", "dotnetnuke", "decryption", "etnies", "subsec", "cxx", "grinnell", "alexei", "helly", "protestors", "signings", "parnell", "gretna", "guida", "abl", "farscape", "hdtvs", "sde", "cyborg", "yanks", "hematopoietic", "clot", "imprints", "opensolaris", "inflationary", "elie", "traceroute", "fgm", "cuddle", "workbooks", "fallback", "permutations", "downer", "abelian", "cabela", "transferee", "quantitatively", "sheepdog", "cameraman", "pinochet", "replicating", "tci", "slashes", "streetpilot", "renovating", "paralympic", "dwarves", "cakewalk", "pyro", "phenterminediscount", "tye", "bna", "uwa", "stinks", "trx", "behav", "blackfoot", "kuo", "schaffer", "kemper", "glycemic", "plesk", "slicer", "joshi", "realtytrac", "sandburg", "dnb", "nwi", "reza", "operable", "wargames", "guerrillas", "saito", "tce", "fullsize", "auc", "anzac", "kulkarni", "rabbis", "mendelssohn", "investigational", "photojournalism", "anaal", "christiansen", "centaur", "rubio", "transando", "rapist", "ert", "pratchett", "climatology", "baise", "labtec", "prioritization", "pinhole", "hdpe", "bioengineering", "dirac", "mcu", "alveolar", "westmeath", "lewinsky", "webx", "acco", "soya", "moz", "exorcist", "biofeedback", "atrios", "honduran", "seaview", "douche", "rsh", "soundcard", "resistive", "sylvain", "chubb", "snooper", "atn", "dbase", "katja", "icr", "firepower", "agu", "ges", "cissp", "mangalore", "laois", "ime", "unmodified", "keystroke", "zell", "parkersburg", "yoon", "gillmor", "joyner", "vinnie", "ccf", "grocers", "simulates", "flathead", "castellano", "sigia", "vesting", "misspelled", "prono", "headcount", "panache", "inu", "hallelujah", "joes", "cayuga", "nob", "tpb", "glug", "zodb", "gubernatorial", "goran", "bauhaus", "sarawak", "sparky", "sebastien", "wirelessly", "wpi", "sysop", "factored", "eula", "ohh", "bsb", "polymeric", "salivary", "mfi", "ftaa", "async", "dnd", "kristian", "circadian", "analgesics", "flintshire", "prakash", "productos", "phenotypic", "pelagic", "agronomy", "vss", "aironet", "weightlifting", "yugo", "audiophile", "unidos", "motorcycling", "raine", "testbed", "pediatricians", "fingerprinting", "bunbury", "tasking", "gmd", "emulated", "tweaked", "phonological", "barco", "gomes", "osf", "faridabad", "aprs", "snappy", "opa", "colonic", "jeroen", "qin", "zircon", "svt", "dansko", "caspase", "encinitas", "tuo", "remoting", "ploy", "achat", "freefind", "spellings", "canopus", "dme", "gaulle", "maplin", "dutchess", "wattage", "puke", "distinfo", "leia", "expeditionary", "amortized", "truckee", "albury", "humanistic", "travelogue", "triglycerides", "gstreamer", "leavitt", "shotguns", "discounting", "etoys", "thirties", "swipe", "dionne", "ebscohost", "tns", "geoquote", "upkeep", "truncation", "gdi", "bausch", "pomeroy", "harrods", "downgrade", "roomates", "biliary", "dumpster", "universalist", "acdbarc", "ywca", "oceanview", "fazendo", "shayne", "tomy", "resized", "yorkie", "qx", "matteo", "shanahan", "japonica", "froogle", "rehnquist", "megabyte", "ginsberg", "vivienne", "penticton", "inseam", "csh", "pressurized", "sld", "faves", "edf", "massagers", "ente", "timesheet", "anniston", "sigur", "toughbook", "histological", "clays", "pcx", "suzie", "honeycomb", "denier", "udo", "etcetera", "reopening", "herrmann", "ifr", "quantifying", "qigong", "cbn", "kurzweil", "chanukah", "programas", "fumbles", "jobseekers", "nitrite", "catchers", "mouser", "rrs", "knysna", "arti", "andrey", "textarea", "weis", "pesto", "ilm", "ponderosa", "kroatien", "transitioning", "whoops", "catamaran", "preoperative", "cbe", "verilog", "helios", "qz", "wheelbase", "narayan", "voyforums", "csg", "unctad", "monomer", "refueling", "ilife", "biennium", "coho", "pellepennan", "quartile", "anwar", "infobank", "hexagon", "ceu", "geodetic", "anda", "emporis", "ahmadinejad", "lubes", "consensual", "altimeter", "nmi", "psm", "lawler", "sharpener", "stellenbosch", "soundex", "setenv", "mpt", "goldfinger", "asahi", "ascorbic", "himachal", "dichotomy", "communigate", "covalent", "cantrell", "tarpon", "bluffton", "radix", "orthologous", "taichi", "borealis", "nerf", "rosedale", "policyholders", "nst", "racecourse", "extraterrestrial", "kok", "servicemen", "starwood", "asco", "nui", "phylogeny", "jis", "tiesto", "ameri", "plankton", "pkt", "seamus", "sublets", "unthreaded", "microstrategy", "cleanups", "fitchburg", "flowchart", "tacky", "sauk", "supercomputing", "antiwar", "illawarra", "benetton", "menopausal", "workgroups", "relive", "ketchum", "nieuws", "mirago", "reproducibility", "abalone", "ashmore", "ssx", "eachother", "gsx", "juggs", "ded", "geometries", "petzl", "edie", "quirks", "sbe", "bundy", "pina", "crayola", "acceptor", "iri", "precondition", "padova", "indica", "roddick", "teasers", "beveled", "consumerism", "flr", "yeovil", "boneless", "intracranial", "kbd", "tatoo", "gameday", "solute", "tupperware", "ridgefield", "gce", "quadro", "mumps", "trucos", "mopar", "haggis", "electromechanical", "styli", "whipple", "fpm", "arcata", "perego", "guwahati", "loudon", "legolas", "rockaway", "exhibitionist", "woolley", "msps", "toolset", "ferragamo", "bott", "godiva", "nsn", "vfw", "masculinity", "schrader", "bld", "lightfoot", "capitalizing", "rucker", "browsed", "hcg", "freenet", "bundling", "cannondale", "mcat", "blt", "mencken", "commerical", "dagenham", "codename", "nesgc", "profess", "rearrange", "warfarin", "stdin", "rohan", "overheating", "condon", "inflate", "npd", "gunnison", "hhh", "sfmt", "devonport", "copywriter", "bodybuilder", "poss", "psigate", "ecp", "airforce", "fleischer", "atmel", "rasta", "ravel", "jupiterresearch", "flycatcher", "cusack", "jenni", "gbps", "bombshell", "llbean", "arnie", "subdomains", "kale", "pcd", "shemp", "findtech", "huck", "vouyer", "horrendous", "complainants", "addy", "ehs", "fabricating", "mmo", "verdate", "cyberpunk", "enotes", "pecans", "ababa", "whitehorse", "barak", "juke", "schnauzer", "hairdressers", "prioritized", "rainforests", "exo", "rabin", "workday", "eared", "earphone", "passaic", "vme", "hypermedia", "udb", "jinx", "illiteracy", "carcinogens", "offres", "addressee", "thefreedictionary", "informants", "tics", "sublimation", "harnessing", "extenders", "fishman", "hmi", "tsk", "inj", "wvu", "zimmermann", "dupage", "belarusian", "maia", "lynyrd", "messianic", "mexicana", "generalist", "gastronomy", "ugs", "huckleberry", "ridgewood", "pii", "dua", "phan", "lightsaber", "vivanco", "catheters", "azerbaijani", "whitmore", "footy", "joinery", "wasatch", "octagon", "equates", "sorenson", "eames", "tacos", "misspellings", "trivandrum", "kingsville", "magnetics", "rce", "halide", "metabolite", "clo", "genders", "headgear", "gretzky", "harming", "insole", "colvin", "kano", "thurrock", "cardstock", "journaling", "univers", "aragorn", "principled", "namibian", "slacks", "mcsd", "wmp", "fairmount", "physica", "subtropical", "sager", "trk", "bowflex", "subcommittees", "jia", "ramesh", "sitepoint", "prawn", "phylum", "mephisto", "prf", "mundial", "waveforms", "algal", "schafer", "riddell", "gimmicks", "reparations", "injectable", "sher", "trondheim", "mhs", "libwww", "phenix", "tlv", "rena", "tcpdump", "quinlan", "ecampus", "kaya", "ethically", "sity", "fkk", "freeradius", "nmh", "puffin", "freeride", "ahern", "shaper", "locksmiths", "lichfield", "cheater", "tora", "hsi", "bootcamp", "torus", "mondeo", "cotta", "oac", "evi", "jre", "vignettes", "aculaser", "waxman", "raping", "oryza", "leashes", "babydoll", "srgb", "practicality", "winer", "thon", "battelle", "inp", "europcar", "pancreatitis", "americus", "immunohistochemistry", "woodlawn", "filigree", "forecasted", "bypassing", "chock", "chocolat", "messier", "gravis", "edson", "nathalie", "calendario", "blenheim", "clarksburg", "trigonometry", "virusscan", "flanges", "bowlers", "tsi", "ipos", "harlingen", "keypads", "sosui", "campanile", "vassar", "regress", "ghosh", "iab", "hao", "ntu", "ivey", "techdirt", "pmt", "minutemen", "pias", "celiac", "hough", "ingested", "hypothyroidism", "boyfriends", "jeong", "equifax", "baroda", "cybernetics", "tissot", "daf", "prefered", "rappers", "discontinuation", "mpe", "elgar", "cumulus", "brltty", "klan", "goku", "offsetting", "airmen", "halliwell", "ionizing", "angebote", "morphy", "bookmaker", "curio", "hookers", "amalgam", "notional", "webactive", "bechtel", "zambian", "reinhardt", "bridgend", "bendix", "dists", "magnetometer", "populist", "mimo", "bsu", "renfrew", "hesperia", "chautauqua", "mnemonic", "interviewers", "garageband", "invariance", "meriden", "aspartate", "aramis", "pleural", "tsu", "mediating", "gabriele", "resonator", "provincetown", "afx", "surpluses", "ertl", "holger", "castlevania", "vaniqa", "finisher", "ead", "quartets", "heber", "muschis", "anthropogenic", "thermos", "macroscopic", "torrington", "gillingham", "geopolitical", "flaherty", "varietal", "assfucked", "engle", "gorillas", "ihc", "shatner", "euc", "juarez", "helicobacter", "epidural", "luisa", "teardrop", "anion", "glosspost", "numeral", "mdx", "orthodontics", "tabby", "cyngor", "onl", "claddagh", "abf", "therm", "myeloid", "pugs", "sprocket", "roh", "unilever", "ctu", "genomebrowser", "sima", "hants", "maclaren", "chairmans", "yim", "workflows", "adn", "ansel", "dragostea", "hrvatski", "ayala", "bfg", "tonawanda", "imovie", "regionals", "kami", "jansport", "fanfic", "tasha", "nikkei", "snm", "lynnwood", "glucophage", "bicentennial", "arl", "radiologic", "kts", "agosto", "mineralogy", "corsicana", "harrier", "sciencedirect", "krugerpark", "oireachtas", "esposito", "adjusters", "olympiad", "fname", "iar", "allende", "ldc", "sited", "surry", "strainer", "paragliding", "whitetail", "pagemaker", "astrid", "tripled", "gwar", "atwater", "overpayment", "faeroe", "wisenut", "nagel", "blatantly", "chicano", "chongqing", "corporates", "applicators", "erasing", "svetlana", "fleer", "bossa", "deuces", "fud", "dalian", "anycom", "gunfire", "mcnair", "subtilis", "hdi", "percutaneous", "cursos", "cols", "urth", "northbrook", "rmk", "mgf", "voli", "leann", "pixmaps", "gigablast", "metronome", "blackman", "fliers", "rdbms", "imprimir", "grouper", "negate", "roessler", "intrastate", "manawatu", "blass", "ainsworth", "denzel", "tfl", "moped", "appointees", "bunkers", "refrigerate", "ligase", "otp", "beleive", "warlords", "hatteras", "symlink", "almeida", "blogcritics", "cochlear", "janelle", "alphabets", "atta", "foldable", "hydroponics", "precast", "univer", "purest", "fatboy", "cei", "westerners", "camarillo", "kelty", "volunteerism", "pdq", "openacs", "hor", "newham", "energie", "radiographic", "kinematics", "errol", "otabletest", "isobaric", "hba", "gratuitos", "innd", "eads", "personalise", "tbl", "fso", "patenting", "reciprocating", "rto", "subcellular", "crosbie", "harmonisation", "dunfermline", "janesville", "egroupware", "caritas", "tsm", "egf", "roa", "debhelper", "nsaids", "milt", "burleson", "pba", "ragtime", "adopters", "impor", "philo", "backseatbangers", "rushville", "saitek", "synthesizers", "vulva", "arapahoe", "posey", "minuteman", "zinfandel", "mayoral", "fortis", "medicina", "gallary", "honeys", "pinus", "interlink", "greening", "tesol", "artnet", "crw", "bansko", "brien", "silvery", "guevara", "thinkin", "sedu", "automakers", "igmp", "overtake", "semicolon", "bubbly", "edwardsville", "ques", "homebuyer", "nodal", "mpo", "unbeaten", "rawls", "ocx", "ork", "sheeting", "hallways", "alzheimers", "snooze", "kestrel", "nadh", "americorps", "prawns", "nonpartisan", "naps", "domina", "eldon", "palomar", "riedel", "hoppers", "onscreen", "gdk", "distillers", "uploader", "caltrans", "tyra", "cocksuckers", "mtbe", "hypertensive", "xie", "chinchilla", "bucs", "transformational", "sailboats", "heisman", "grn", "jct", "exemplifies", "arrhythmia", "astrometric", "workwear", "tolstoy", "asperger", "koop", "newydd", "transpose", "lpr", "xray", "ferrer", "microeconomics", "kafka", "telly", "grandstand", "toyo", "slurp", "allocator", "islas", "ila", "westland", "instantiated", "lewisburg", "stylists", "blackwater", "vivi", "hippies", "pul", "larkspur", "kea", "lesben", "motherwell", "ahs", "cappella", "neocon", "getname", "coyle", "rudi", "departamento", "winrar", "mussel", "britax", "diwali", "raines", "dso", "wyse", "geourl", "etheridge", "docomo", "webindex", "accrediting", "stapler", "pheromones", "woodson", "imm", "volcom", "telewest", "lcp", "bisexuals", "ozzie", "kitsap", "oic", "cutest", "hoon", "mpp", "cte", "dymo", "yolo", "quinton", "jorgensen", "printouts", "tempt", "credentialing", "scalloped", "sealey", "galvin", "etudes", "gurney", "bluefly", "schweitzer", "jawa", "geochemical", "allegany", "aldridge", "digitizing", "aki", "organically", "chatboard", "lomb", "uddi", "yng", "roleplay", "pavillion", "barstow", "patna", "rootkit", "spearhead", "leonid", "sunnis", "reticulum", "dulcimer", "unl", "kalman", "npl", "coronal", "rendell", "transparently", "mfs", "freeform", "gianfranco", "tantric", "reif", "woodhouse", "lifter", "seymore", "ogle", "sayin", "cpas", "videographer", "gpe", "stallone", "uams", "pula", "trudeau", "buss", "ouest", "korner", "fatherhood", "debussy", "qsl", "reflexes", "hlth", "wyman", "kingsport", "gauthier", "vadim", "magnetization", "trd", "aitken", "millers", "titted", "clerics", "busses", "trai", "underpin", "ajc", "dumbledore", "vinny", "delicately", "webroot", "yip", "producti", "teksty", "pullout", "dmi", "yellowcard", "sbi", "dmt", "nce", "birdhouse", "bnd", "neko", "chillicothe", "peacekeepers", "schmitz", "rimming", "solent", "propylene", "supercross", "zsh", "multnomah", "foxconn", "fuelled", "biohazard", "horrifying", "parque", "toffee", "fpl", "riemann", "horsesex", "mahatma", "mubarak", "bachmann", "caswell", "chiron", "hailey", "pippin", "nbp", "ramallah", "isoforms", "dictyostelium", "tauranga", "hawkeyes", "maxxum", "eire", "knowit", "topanga", "geller", "parliamentarians", "inadvertent", "utes", "boardman", "denham", "rofl", "homophobia", "winches", "uptodate", "centralia", "eschaton", "hoaxes", "hillingdon", "buble", "hairspray", "acdsee", "offerte", "urb", "intellicast", "minn", "frc", "antisense", "pelosi", "shader", "gisborne", "grafts", "hillbilly", "intifada", "carina", "fon", "ehow", "vpi", "brunel", "rtx", "roald", "externalities", "metzger", "balsamic", "classically", "calorimeter", "necked", "idiopathic", "lileks", "tahoma", "ogc", "unidirectional", "westbound", "layla", "galeries", "cabinetry", "suarez", "stipulates", "towertalk", "optimizes", "serializable", "universite", "ald", "ringsurf", "toques", "rayleigh", "dropouts", "fws", "gamecocks", "gazprom", "braden", "amet", "sinusitis", "rusk", "fractals", "depressants", "clec", "tryouts", "rushmore", "shel", "adapts", "farlex", "emac", "phl", "remax", "wizbang", "endnotes", "rodman", "dissidents", "iterate", "conair", "ember", "vsa", "neolithic", "mgx", "acuvue", "vetoed", "uruguayan", "corrigan", "libxml", "etronics", "simian", "atmos", "msk", "iib", "multimode", "teensforcash", "annu", "sunbury", "girardeau", "dbg", "morrisville", "netmeeting", "asso", "estore", "universes", "ganglia", "ghanaian", "resonances", "subjectivity", "microarrays", "easypic", "abbeville", "newsre", "cobble", "flightgear", "spode", "berea", "mckinnon", "bucky", "plunger", "xing", "siggraph", "bookends", "klingon", "moreland", "lowery", "histograms", "moll", "floorplans", "netherland", "frasier", "rossignol", "polyline", "laroche", "cytosol", "disposals", "xforms", "mosul", "motu", "amersham", "chordata", "crafters", "kingsbury", "yoox", "hyphen", "dermalogica", "moreton", "glycoproteins", "aristide", "unsorted", "rambus", "ptf", "scorsese", "patricks", "microwarehouse", "bch", "blyth", "grampian", "livedaily", "nces", "alizee", "detain", "andrzej", "optimus", "alfie", "immunisation", "pfaltzgraff", "eyelets", "swordfish", "legals", "hendry", "homogeneity", "hartland", "recreated", "leaded", "hunan", "supersonics", "amstrad", "vinaigrette", "scd", "mch", "nintendogs", "dvx", "unreadable", "plattsburgh", "balsa", "aya", "brasserie", "gcl", "salton", "paulson", "dvdplayer", "silverton", "enduro", "peepshow", "givens", "bristow", "pecuniary", "vintages", "ozarks", "johor", "zia", "mucosal", "prehistory", "histidine", "mti", "drape", "tectonics", "lorentz", "distributive", "sharps", "seguridad", "ghd", "gilberto", "doomsday", "otters", "gervais", "mews", "scarring", "daydream", "gooding", "snicket", "bicarbonate", "boggs", "wps", "dietitian", "itf", "harriman", "paprika", "haviland", "novato", "dyn", "hornsby", "biden", "disallowed", "zahn", "jordi", "correo", "frida", "chappelle", "resourcing", "methuen", "zoneinfo", "adelphi", "orbison", "geffen", "informatik", "novella", "brie", "galeon", "silos", "lrwxrwxrwx", "shortstop", "cua", "dordrecht", "permissive", "creston", "prec", "nco", "nehru", "bromwich", "disposables", "estrogens", "mulholland", "rui", "haz", "eol", "odometer", "tooltip", "ibb", "mosby", "druids", "aggregators", "herfirstbigcock", "rti", "arvada", "fixme", "rodger", "tively", "gizmondo", "cucina", "ivo", "griddle", "pricelist", "juventus", "conroe", "multipliers", "aparthotel", "kitesurfing", "couplers", "aftershaves", "rehabilitate", "patina", "scansoft", "quadra", "sousa", "phonology", "dunkin", "deat", "plasmodium", "bums", "undersea", "aretha", "lts", "boxster", "staf", "bcg", "overexpression", "vanadium", "wilkerson", "riverboat", "voa", "kohn", "bgl", "jiu", "ipi", "contl", "ottumwa", "gynecologic", "unstoppable", "pedometer", "shortfalls", "ksa", "bookmarking", "ingham", "yoder", "esu", "vbs", "barbershop", "drinkware", "idiosyncratic", "googlebot", "floppies", "tashkent", "foxboro", "allstar", "hervey", "fes", "kilowatt", "evga", "nikos", "tance", "varian", "mops", "coughlin", "commutative", "lansdowne", "bcbg", "syrah", "affx", "angiogenesis", "nicosia", "nematode", "kegg", "pkr", "enso", "administratively", "tma", "capa", "ronaldo", "leverages", "cco", "cancerous", "banderas", "gmane", "vq", "gabriela", "secretory", "mmx", "pinehurst", "nro", "reassessment", "ippp", "chillers", "elbert", "sunil", "yuki", "periodicity", "trypsin", "bursary", "dependability", "overdraft", "deirdre", "colonia", "mycoplasma", "lesbains", "adelphia", "scribner", "aro", "activites", "uaw", "frankel", "cacti", "bugaboo", "palmdale", "aeration", "kita", "muscletech", "watersport", "paf", "nxt", "uscg", "yitp", "gibb", "gener", "nak", "unm", "zhong", "chowder", "expatriates", "centerpieces", "freaked", "curbs", "tdp", "gruppensex", "triphosphate", "acronis", "wcw", "prostaglandin", "completo", "darwinports", "abiword", "hippocampal", "atlassian", "technik", "vineland", "commentaires", "ters", "stuttering", "forcefully", "depo", "edinburg", "kwanzaa", "kzsu", "mascots", "harrisonburg", "cadbury", "scoble", "aor", "conundrum", "bullard", "aiff", "comedic", "apical", "synoptic", "miyazaki", "beryllium", "disinfectant", "sentra", "joi", "jokers", "wci", "piglet", "wildcards", "tresor", "sketchbook", "bbd", "halliday", "manolo", "tifton", "repre", "hendrickson", "windhoek", "lomond", "atapi", "hbh", "eccles", "ofa", "dcu", "spatula", "intergenerational", "epub", "cates", "featurette", "gotcha", "kindersley", "drifter", "cvsnt", "ogy", "lagerfeld", "lewin", "youve", "unaids", "larue", "stardom", "assad", "glenview", "brantford", "kelis", "nola", "lxr", "toastmasters", "appr", "recs", "ranchi", "exotics", "articulating", "jiffy", "goodall", "gconf", "verkaufen", "scalextric", "ryobi", "qname", "immerse", "farris", "joinwelcome", "cce", "wittenberg", "capone", "mtp", "busines", "rebounding", "usborne", "hirsute", "prelim", "prepress", "rop", "militias", "ttd", "commodores", "ecnext", "dbf", "goldsboro", "ashburn", "roslyn", "neverland", "coolio", "lindbergh", "freeciv", "indice", "vertebral", "ectopic", "abcs", "lge", "bnl", "coulomb", "minton", "oban", "restatement", "wakeboard", "unscheduled", "dbc", "visser", "clipland", "thermocouple", "masala", "clt", "drw", "rosas", "rdram", "mcclain", "maki", "rosenbaum", "eagan", "slv", "sunburn", "pleistocene", "nips", "sfi", "canisters", "kas", "waddell", "solvency", "lynette", "plainview", "fielded", "blowfish", "zyprexa", "altrincham", "workin", "afton", "topologies", "touts", "pino", "xelibri", "lora", "mendez", "undelete", "samuels", "rajesh", "soros", "unjustified", "nfo", "crf", "digitale", "sitcoms", "analogues", "leukaemia", "ukulele", "paperboard", "fied", "cobain", "trillian", "offaly", "girlie", "ilcs", "friggin", "wq", "davinci", "oxon", "expressionengine", "bains", "rse", "callbacks", "cdv", "hannity", "replicates", "sidewinder", "queueing", "slugger", "humidifiers", "desai", "watermarks", "hingis", "vacanze", "onenote", "montebello", "streetcar", "stoker", "fulcrum", "sadistic", "cassiopeia", "corwin", "qut", "martingale", "saucony", "winslet", "criticizes", "baytown", "synchronizing", "reclassification", "woohoo", "htl", "caithness", "takeaway", "timeouts", "reit", "dietz", "devo", "morgage", "koo", "ducky", "bola", "mdb", "multimodal", "recenter", "hematite", "hensley", "asterix", "hokies", "blumenthal", "multinationals", "aag", "debs", "playin", "emeril", "mcalester", "adria", "shipman", "burzi", "incinerator", "muenchen", "convening", "unorthodox", "fibroblast", "gloryholes", "carrick", "immersive", "darmowe", "catagory", "glob", "cisplatin", "rpa", "fertiliser", "nuova", "halstead", "voids", "vig", "reinvent", "pender", "bellied", "oilfield", "afrique", "ream", "mila", "roundtrip", "mpl", "kickin", "hiatt", "droid", "addenda", "restorations", "boll", "knightley", "worksite", "lcg", "typename", "aris", "isv", "doctype", "balinese", "sportster", "dence", "lesbi", "saversoftware", "bursaries", "cuny", "cardiopulmonary", "biologic", "wanadoo", "shiatsu", "homewares", "dpc", "qk", "schizophrenic", "unplug", "albergo", "pressroom", "gingrich", "basra", "greenbrier", "superoxide", "porcine", "oldfield", "wxdxh", "luder", "shim", "manx", "understatement", "geda", "tormented", "immanuel", "whistleblower", "hopi", "idd", "gol", "bayswater", "lyne", "epox", "kennewick", "subtree", "inshore", "ibd", "hepnames", "benn", "kettler", "clots", "reducer", "naturists", "lvd", "flonase", "sympa", "hinsdale", "trav", "spina", "meatballs", "underrepresented", "bpl", "etb", "brane", "tightness", "tracklisting", "horizonte", "rgd", "concatenation", "suffixes", "kilmer", "cloverdale", "barbera", "seascape", "amdt", "linings", "horseradish", "telepharmacy", "itasca", "varbusiness", "paulsen", "cortina", "ides", "hazelnut", "ashfield", "chaco", "reintegration", "pampering", "boland", "airtime", "surrealism", "imi", "eit", "clamshell", "tonk", "luminance", "ixtapa", "gryphon", "ecos", "cair", "rochas", "farnsworth", "synchronisation", "suresh", "minnow", "bloor", "gumbo", "faqforum", "kunal", "jossey", "rci", "upa", "melamine", "wonwinglo", "episodic", "xcel", "jurys", "descendents", "ezmlm", "twikiaccesscontrol", "tonos", "lated", "montero", "divisive", "soci", "guia", "gastonia", "inappropriately", "valentina", "lubricating", "itworld", "deca", "branford", "kody", "accruals", "epitope", "jdj", "crenshaw", "perlman", "medallions", "rokr", "usg", "microtel", "rsx", "graff", "jcsg", "fds", "cooney", "whittle", "gmthttp", "rayburn", "etat", "suppressant", "hecht", "sportsnation", "sso", "ccnp", "reworked", "etl", "catapult", "vries", "procurve", "cbot", "elitist", "convoluted", "iberian", "optoelectronics", "mailscanner", "kazakh", "stimulator", "schoolchildren", "commweb", "thornhill", "tweezers", "lani", "ouvir", "filetype", "bearcats", "fanclub", "boehringer", "brasileira", "webservices", "kinematic", "chemie", "inoue", "unsupervised", "norvegicus", "copycat", "orrin", "snooping", "hashem", "telesyn", "mcb", "imple", "dorms", "elist", "laminates", "ingalls", "checksums", "tandberg", "iirc", "mackinnon", "roddy", "margolis", "erotaste", "pimps", "mcdougall", "smg", "mpx", "fhm", "travelzoo", "thermally", "teleconferencing", "albino", "cargill", "hyd", "visualizing", "mothercare", "sprinter", "isomorphic", "pepperdine", "cvc", "mahon", "conjugation", "macally", "anklets", "impasse", "disinformation", "beavis", "delicatessens", "intensively", "echocardiography", "pav", "amok", "riddick", "sexism", "ordinates", "gallaries", "baldur", "elon", "beasty", "arty", "leukocyte", "chau", "cotter", "peptidase", "fsi", "postmodernism", "osm", "squeaky", "silicate", "alcohols", "zydeco", "testi", "trujillo", "predictably", "weider", "shareholding", "giordano", "cardiomyopathy", "aprilia", "mcnabb", "lenz", "homeencarta", "disconnection", "scada", "spacetime", "trb", "awol", "espa", "bionic", "batista", "bookshops", "feynman", "captioning", "sibelius", "obstetric", "marigold", "ostsee", "martel", "hcfa", "ino", "ctm", "whi", "typesetting", "ervin", "chroma", "steinbeck", "pusy", "biblioteca", "neutrophils", "dunbartonshire", "lollipop", "brash", "avl", "opi", "declaratory", "corus", "elph", "naf", "htp", "hydrate", "ubb", "littlefield", "neutrinos", "aso", "bric", "subways", "tui", "leominster", "ncsa", "snipsnap", "negativity", "arcview", "picasa", "tortillas", "awww", "dara", "ragga", "innova", "doorbell", "ebc", "sgl", "unsettling", "snps", "explicito", "phila", "bugger", "persson", "embolism", "iip", "silverplate", "lats", "ovc", "roebuck", "sbp", "lipton", "starling", "coreldraw", "haney", "globemedia", "adrenalin", "murphys", "nicklaus", "yardley", "afghani", "tst", "hrd", "haulers", "energize", "prohibitive", "sydd", "nida", "barcodes", "dlink", "includ", "orgie", "macnn", "danni", "imaged", "sprayers", "lindberg", "filesharing", "calibrations", "atorvastatin", "teague", "vantec", "lattices", "cucamonga", "warne", "derwent", "hospitls", "flintstones", "rotisserie", "orcs", "scallop", "biostar", "computationally", "jobseeker", "siem", "sunbathing", "ronda", "npg", "cerritos", "kaz", "chard", "pershing", "clotting", "zhi", "programm", "singlet", "morningside", "simm", "egr", "hackensack", "taf", "kinshasa", "availablity", "lrd", "lugs", "kiddies", "cpsc", "hebert", "asta", "gato", "cimarron", "crowell", "fanart", "nagin", "gfi", "collapsible", "helsing", "haringey", "phu", "stes", "prophylactic", "rosenfeld", "cityscape", "tradeoff", "sask", "instill", "ypsilanti", "lifes", "imate", "firestorm", "homestay", "inept", "peet", "shiseido", "steves", "sascha", "reconstructing", "okt", "droplet", "dhe", "lakota", "revises", "ipt", "macrae", "parlay", "bdt", "woodville", "xlarge", "proform", "gothamist", "coexist", "advisement", "fulltime", "macosx", "metra", "cyg", "turtleneck", "aquos", "hcs", "tsar", "isbl", "gigabytes", "triangulation", "burleigh", "anarchism", "stabilizers", "gbic", "ciba", "activa", "cgt", "terrance", "smoothies", "orsay", "belling", "bnsf", "opps", "representational", "kagome", "snark", "woodard", "malignancy", "makati", "cbm", "bwi", "farah", "sitewide", "newfound", "collider", "candi", "lgf", "boylston", "swi", "rizzo", "wristwatch", "owensboro", "papas", "subscribes", "lah", "wining", "cies", "ganesh", "castleton", "zippers", "decaf", "emphasises", "cbp", "crx", "shakur", "rso", "euroffice", "roush", "caloric", "plaintext", "ofm", "daniele", "nucleoside", "xsi", "buttercup", "oakes", "searle", "shuppan", "lanyards", "cushman", "admissibility", "courtenay", "aspartame", "sleuth", "trudy", "neem", "magix", "cosh", "aurangabad", "golding", "ethnography", "yamaguchi", "bhs", "bulkhead", "kain", "abta", "herzegowina", "minas", "paradiso", "cityscapes", "oit", "replenishment", "autobytel", "kroger", "dexamethasone", "strunk", "yoghurt", "nationalists", "tfs", "definable", "bruin", "psychoanalytic", "reserva", "nasser", "simp", "zmailer", "birthing", "collinsville", "dimer", "powells", "abebooks", "stemware", "landsat", "peebles", "dewar", "docked", "burp", "radioisotopes", "obstetricians", "vinson", "efx", "naia", "idb", "fahey", "multisync", "worley", "oms", "kerri", "arith", "democratically", "datasource", "mcelroy", "cze", "shopgenie", "udev", "nicol", "camara", "degas", "benassi", "prefabricated", "gastro", "accessor", "meteorites", "notts", "lipoproteins", "attleboro", "parenteral", "biosystems", "cerebrovascular", "fsn", "bahraini", "actuaries", "delicatessen", "rng", "marianna", "creatas", "kidderminster", "waukegan", "antifungal", "promulgate", "mvr", "socorro", "maximized", "bde", "dlx", "erythromycin", "dtg", "nady", "leibniz", "flix", "cusp", "homers", "crandall", "holcomb", "beaulieu", "tct", "abington", "pointy", "hamradio", "meso", "monmouthshire", "danvers", "tpl", "baptisms", "backprevious", "carnaval", "recompile", "mainboards", "fclose", "melodias", "cliquez", "doberman", "installshield", "fasb", "estas", "htpc", "stover", "cerruti", "brainerd", "oxycodone", "istituto", "revs", "maha", "compressive", "wombat", "antenne", "patek", "zippy", "neteller", "odeon", "sbir", "backslash", "townhome", "victorville", "amityville", "arpa", "trannys", "goers", "chipper", "gulfstream", "modulate", "xserver", "infosec", "agt", "underwired", "ambiguities", "khai", "norepinephrine", "kundalini", "elkton", "carcassonne", "saygrace", "appending", "marathi", "songbooks", "islamists", "recursos", "newcomb", "stampa", "newscast", "vtp", "stockwell", "nederlandse", "outtakes", "boos", "lavie", "fina", "retinopathy", "deportes", "tremont", "barrio", "buggies", "zacks", "exercisable", "speedup", "holl", "efc", "cibc", "ontological", "thinkstock", "flashbacks", "kennett", "dentures", "eckerd", "xetra", "stg", "reimbursable", "informit", "cdbg", "yeltsin", "nitrates", "aeruginosa", "rpath", "archaeologist", "mitotic", "generalised", "outliers", "sug", "frac", "cowon", "semifinal", "deactivate", "studie", "kazakstan", "sva", "citesummary", "kubota", "chroot", "falciparum", "shifters", "undetected", "mepis", "caries", "microstructure", "ringwood", "pleaser", "compuserve", "disassembly", "miter", "propositional", "javaworld", "ssd", "writeups", "hoskins", "buytop", "frome", "talkie", "loy", "exxonmobil", "emeryville", "gamepad", "metazoa", "kml", "maul", "taoiseach", "siskiyou", "censuses", "offseason", "scienze", "shelved", "etd", "carryover", "fagan", "jada", "wholeheartedly", "polyps", "avast", "northport", "inelastic", "puebla", "idps", "warrenton", "traffickers", "neckline", "aerodynamics", "eto", "satcodx", "leviathan", "dfg", "classico", "harvmac", "wrinkled", "minimising", "bifurcation", "kimi", "npcs", "astrazeneca", "poetics", "jef", "miniseries", "yesterdays", "dcm", "issa", "toxicol", "libdir", "angolan", "waynesboro", "relayed", "fcst", "ulcerative", "bgs", "airlift", "downlink", "endothelium", "suppresses", "weinberger", "appointee", "darcs", "hashes", "nuff", "anza", "borehole", "flt", "htdig", "hain", "nodules", "bowdoin", "tunable", "memcpy", "ucp", "panelist", "opr", "transsexuelle", "mailroom", "nijmegen", "medalist", "ryman", "gmos", "recessive", "putas", "abou", "encrypting", "enola", "rippers", "steyn", "redefinition", "infield", "reformat", "atchison", "yangtze", "zw", "peels", "preterm", "mindfulness", "hwnd", "stances", "synapses", "hashing", "gere", "lrg", "unmounted", "armoires", "archetypes", "behemoth", "stereophonics", "obsessions", "piosenek", "mhp", "thrower", "prana", "trike", "bmps", "distillery", "estudios", "ceredigion", "funnier", "rickard", "disengagement", "gratuita", "gifting", "lpga", "esse", "maglite", "iodide", "bakker", "hariri", "digitization", "fistula", "campaigners", "kel", "acca", "lauri", "rockwall", "kellysearch", "crawfish", "tigi", "symbolizes", "liverishome", "thay", "ecuadorian", "injectors", "natick", "mornington", "booklist", "centrist", "inria", "torbay", "femur", "methotrexate", "landslides", "separatist", "jelinek", "darwen", "aung", "outlooks", "matrimonials", "busybox", "openview", "lifeboat", "hara", "tuskegee", "aly", "ciprofloxacin", "gul", "reconfigure", "ahn", "instantiation", "trw", "spambayes", "shelburne", "programma", "lbl", "escalated", "lucasarts", "eastbound", "grits", "apoptotic", "pulldown", "redditch", "trendnet", "iupui", "nsr", "treehouse", "payson", "jaz", "hedrick", "lineman", "streamlines", "reengineering", "cleaver", "prodotti", "inflight", "tracksuit", "polyphonics", "skidmore", "catia", "overuse", "mge", "newsprint", "visakhapatnam", "miko", "hemorrhoids", "haulage", "torrie", "usergroup", "poms", "mostrar", "convolution", "endtime", "maura", "hefce", "abbie", "mfp", "galician", "golem", "conifer", "phenylalanine", "wareham", "nonpublic", "henk", "inversely", "beebe", "dancefloor", "eyelet", "immunologic", "chengdu", "beeswax", "lanham", "crosswalk", "lecken", "kitsch", "scand", "sweeteners", "farnborough", "jalandhar", "publi", "visioneer", "sprints", "reinhold", "emptive", "compa", "hrk", "faked", "manilow", "burnsville", "banyan", "opinionated", "quirk", "hnl", "caterina", "blinks", "fiore", "rationing", "tellers", "jrnl", "waterborne", "astron", "nity", "gree", "tradeoffs", "goldeneye", "occuring", "calientes", "recomend", "functor", "trowbridge", "niu", "mmvi", "obe", "gyro", "technews", "shampoos", "unfiltered", "sabha", "bundesliga", "enix", "communique", "cantina", "cafta", "polyamide", "selectmen", "lncs", "luge", "necromancer", "carcinomas", "subcontinent", "dodds", "seaton", "transcriptase", "balmoral", "specifier", "subsidize", "icl", "galaxie", "ldflags", "hiya", "nappies", "crippling", "xul", "nti", "aspherical", "misheard", "ecw", "sundial", "odom", "flaky", "schlesinger", "kryptonite", "typology", "hydrangea", "preamps", "aesthetically", "vrs", "alvaro", "htg", "heston", "ghia", "sophomores", "binh", "allrefer", "dcf", "scarica", "chorale", "ooc", "fredonia", "tiaras", "sdio", "distr", "dscp", "cogeneration", "flite", "harddisk", "kennedys", "telefono", "saleen", "bosco", "cyclase", "dreamcatcher", "csw", "braddock", "ethnically", "wbt", "morro", "smurf", "yeager", "gelding", "blurring", "deva", "fom", "mastectomy", "cassell", "sarnia", "jaundice", "lastest", "asterisks", "nympho", "jeffers", "hyun", "cooktop", "fddi", "aspergillus", "agric", "kdc", "medics", "mwh", "photosite", "gip", "affirmations", "variational", "socializing", "crankshaft", "isls", "mensaje", "tagline", "airframe", "beater", "preowned", "dietetic", "storedge", "redacted", "rittenhouse", "stereotypical", "klass", "fpa", "treks", "victimization", "parallax", "zante", "splices", "imagenes", "rete", "akita", "nonresidential", "hellman", "durex", "robison", "tof", "lpd", "seri", "freetype", "nexis", "ldv", "collegefuckfest", "aiu", "molloy", "carcinogen", "brs", "catalyzed", "heatwave", "yv", "spindles", "herron", "sita", "watchtower", "fabrizio", "unmanaged", "gtg", "preteens", "heme", "renumbered", "omr", "cowell", "hyip", "crossbow", "speciation", "tfc", "whidbey", "betta", "imt", "emmet", "jewelery", "lumina", "statistician", "symmetries", "observatories", "bupropion", "telligent", "fungicide", "aiptek", "crosstalk", "mello", "deepsand", "litas", "haart", "worx", "coyne", "adenovirus", "hakim", "countywide", "gnucash", "puree", "stott", "sdg", "mandeville", "portugese", "maurizio", "tachycardia", "aja", "eaa", "warrick", "cosine", "veb", "patong", "ballina", "summarise", "accrington", "rnas", "haddon", "xpc", "swath", "azeri", "wta", "ulf", "kleen", "cvm", "meehan", "jenifer", "infiltrate", "mapinfo", "knightsbridge", "renounce", "jesper", "blairsville", "copilot", "koontz", "fma", "northgate", "phobias", "metaframe", "nutritionist", "effector", "bumsen", "rcm", "hairstyle", "nesbitt", "diuretics", "cemetary", "iap", "discards", "basie", "discontinuous", "iqbal", "uncorrected", "stillman", "chloro", "bighorn", "heartbreaking", "xxxvogue", "leitrim", "prg", "justifications", "gimmick", "brasilia", "recordin", "abra", "trn", "zg", "acrylics", "recensione", "fouled", "wiretap", "dvrs", "vocs", "moniker", "scholes", "sharpeners", "calida", "nse", "calloway", "tpicd", "prods", "hfc", "ltda", "snk", "waypoints", "nrm", "underscored", "herrick", "starwars", "smbs", "unreported", "phelan", "guarani", "tampon", "easels", "sxga", "webform", "artista", "elkhorn", "ventana", "sublet", "chiltern", "antares", "peaking", "stichting", "forall", "menuitem", "marshmallow", "hawai", "nfa", "cals", "seltzer", "utep", "homeostasis", "swp", "akamai", "goodie", "milkshake", "thrasher", "switchers", "brussel", "hartwell", "aup", "electrolytes", "machu", "unshaved", "gor", "ilya", "maneuvering", "gaby", "softwood", "ajay", "croupier", "hausa", "compacts", "similiar", "elev", "egos", "rhinitis", "dreamhack", "aop", "beastialty", "whedon", "microcontrollers", "dreamhost", "overcrowding", "retractions", "pinging", "catheterization", "holton", "smears", "jmd", "melo", "exons", "mariachi", "igi", "bday", "reseal", "compositing", "oskaloosa", "coopers", "psone", "versione", "storys", "escher", "hotfix", "rmp", "gaynor", "biota", "dossiers", "arpt", "winsor", "hairdryers", "axon", "morrowind", "puter", "chubbyland", "deflation", "pdo", "dreyfus", "worsened", "darlin", "treme", "reconstituted", "aveda", "legge", "kasper", "mugler", "yorks", "ddi", "badlands", "deploys", "pols", "internets", "backstroke", "resultados", "spooner", "musicmoz", "toothbrushes", "bugatti", "abrahams", "comentarios", "brandywine", "callaghan", "diskettes", "resonate", "intellivision", "castelle", "advertises", "fives", "titusville", "plas", "royston", "nace", "digitaladvisor", "adesso", "geekbuddy", "lipoic", "hazelwood", "gravatar", "outfield", "carcinogenesis", "gdr", "phenolic", "incrementally", "pqi", "lenght", "acompanhante", "orm", "terrapins", "daria", "vander", "ccie", "mathml", "legalization", "allendale", "modernize", "orl", "gert", "restarts", "juris", "brookside", "streamer", "rollei", "accumulator", "picchu", "abril", "crocus", "zl", "citizenry", "accountemps", "swenson", "unfpa", "ewido", "centreville", "alisa", "kingsway", "erlangen", "offtopic", "laundromat", "redeemable", "maxillofacial", "slutsfree", "glp", "baumann", "revolutionaries", "chillin", "cardomain", "creamed", "tarp", "schering", "aten", "bikaner", "chimpanzee", "petco", "flurries", "rau", "miki", "meson", "parathyroid", "cmb", "analgesia", "nqa", "theyre", "elp", "altera", "jeddah", "nannies", "pawtucket", "bimonthly", "senna", "wardrobes", "surgically", "nongovernmental", "inge", "rmdir", "miso", "itx", "hydrostatic", "attrib", "cheaters", "hagan", "canlii", "leong", "koehler", "clostridium", "nerdy", "mcnulty", "megastores", "imperatives", "bpd", "archetype", "kkk", "oren", "halsey", "artic", "techworld", "vnd", "shamanism", "numara", "csx", "reiserfs", "roussillon", "cheadle", "crea", "alcorn", "ences", "bowser", "fizz", "rationalize", "karoo", "unearth", "biopsies", "inconclusive", "hookups", "herrin", "thermostats", "canoscan", "moldovan", "jamiroquai", "xerces", "subclause", "classname", "makefiles", "bettie", "sheesh", "birdwatching", "speakeasy", "harpers", "hayashi", "epitopes", "drivel", "blandford", "foci", "toppings", "cantilever", "biloba", "pth", "tweety", "initializes", "keck", "fisica", "macromolecular", "eic", "skagit", "kimura", "baca", "pareto", "lymphoid", "apacer", "forklifts", "pvs", "refuges", "jal", "habana", "stateless", "virtua", "cerebellum", "vtk", "breville", "statehood", "dct", "palgrave", "bledsoe", "insanely", "inglese", "aidable", "bubblegum", "aphex", "wroclaw", "rajkot", "taxidermy", "esubscribe", "cartagena", "juergen", "itravel", "pashmina", "gustafson", "jacqui", "salim", "barnum", "anthropologists", "glues", "undercut", "eci", "cstv", "watsonville", "roaster", "redbridge", "hypertrophy", "raza", "duron", "xserve", "wobble", "fergie", "bohr", "boilermakers", "counterstrike", "hinterland", "sufi", "milfcruiser", "afdc", "niggaz", "housewarming", "regenerative", "corre", "liquidators", "clegg", "bagless", "bleachers", "deodorants", "bacteriophage", "sheena", "prez", "brasileiros", "transect", "thumbshots", "soloists", "borges", "sinusoidal", "manpage", "lazer", "babys", "crossovers", "parsers", "lsl", "chuan", "hauler", "cataloguing", "oralsex", "storia", "fotosearch", "usfs", "leappad", "interesdting", "headroom", "fortnightly", "yerba", "kuta", "clearfield", "huggins", "washoe", "srg", "stabilisation", "sayers", "publis", "intangibles", "tameside", "summerville", "uvm", "whalen", "kusadasi", "hcp", "flak", "ual", "cubed", "yuck", "concacaf", "textbox", "erythrocytes", "dinky", "divo", "injunctive", "honed", "coincidentally", "kolb", "kruse", "microm", "portugues", "pil", "tht", "deathmatch", "publica", "mde", "pollination", "ews", "synchro", "etobicoke", "midori", "chutney", "jrs", "naturopathic", "dermatologist", "thumbnailpost", "casein", "chillout", "stefanie", "chewable", "direc", "quintana", "normals", "villeneuve", "scrum", "everyman", "lopes", "eastland", "footballers", "xviewg", "metropole", "swarthmore", "multicenter", "fett", "sagebrush", "convenor", "pco", "proteome", "warheads", "radiologist", "liao", "westview", "optus", "medicinenet", "hitches", "britten", "palettes", "vma", "depauw", "gunman", "agassi", "panoz", "uwb", "movi", "scanlon", "nutri", "mitra", "guilders", "filmpje", "indexer", "ofdm", "ullman", "coachella", "localised", "recom", "downgraded", "ncep", "lalique", "weill", "jeez", "varadero", "chicco", "athabasca", "redd", "azusa", "unbuffered", "phoning", "rtty", "spacey", "fmla", "albatron", "breakpoints", "sperma", "aran", "ciencias", "mortage", "legato", "agarose", "avoca", "reservados", "russellville", "oneonta", "badass", "cfi", "pesca", "carvalho", "nass", "mainpage", "mccord", "kellie", "allstars", "darwinism", "tariq", "workarounds", "omia", "flannery", "rediff", "lecithin", "okmulgee", "lates", "recertification", "phosphorylated", "fusing", "nerc", "avermedia", "abuser", "sevens", "mukherjee", "anatomic", "watercooler", "gatsby", "litho", "mischa", "bangla", "menard", "rattling", "artes", "vacaville", "teo", "enermax", "hypo", "hadron", "gosford", "legalize", "millbrook", "epinephrine", "transom", "liebherr", "mwc", "biel", "vcu", "mils", "oreal", "picayune", "rabanne", "gorbachev", "norelco", "playset", "massacration", "frontman", "garvin", "autologous", "wiretaps", "duggan", "jrc", "chantelle", "liddell", "enraged", "gir", "adrien", "blotter", "jq", "menubar", "gagnon", "sitters", "rdc", "jod", "meteo", "cept", "bih", "programing", "humpback", "fournier", "alquiler", "reprocessing", "chaz", "bartending", "sshd", "opodo", "patiala", "jaques", "glc", "fantastico", "schiffer", "preclinical", "sfn", "conklin", "wheelers", "deductive", "cunard", "pygmy", "jewett", "environnement", "biddle", "basu", "tachometer", "bks", "nonproliferation", "cacharel", "elysees", "orchestration", "adipose", "usu", "freeservers", "potting", "uncomplicated", "piaa", "progs", "ues", "tobey", "sife", "wenzel", "debi", "baez", "tana", "gedcom", "uvc", "puccini", "seca", "ligation", "deconstruction", "inductance", "topicparent", "zanaflex", "medicus", "dmitri", "reallocation", "kalispell", "haight", "teleport", "skylights", "rehabilitative", "swab", "latimer", "boombox", "prorated", "bbr", "pansy", "reassignment", "hydrodynamic", "confirmations", "postulated", "unlabeled", "tosca", "brentford", "integrin", "ranlib", "differentiates", "skelaxin", "velo", "multiprocessor", "tabla", "celluloid", "identically", "saddlery", "whiteside", "eurail", "endicott", "dingo", "sessional", "pagination", "webtopiclist", "infopop", "accc", "iie", "burl", "truncate", "hightower", "polygraph", "allianz", "digress", "overseen", "scg", "thotlib", "bluetake", "cowes", "mailorder", "fetuses", "lowndes", "shr", "childbearing", "aaj", "crayfish", "minotaur", "heist", "mayne", "repaint", "asq", "contr", "zool", "spastic", "suprised", "illuminati", "piezoelectric", "rfps", "cutouts", "ilc", "vinton", "enw", "meir", "tanita", "tpr", "subsidised", "arcsec", "wrestlemania", "fhs", "getter", "mimics", "watermarking", "aftercare", "coombs", "wolfson", "sefton", "compu", "bonaventure", "appz", "ecl", "gview", "temperatura", "diastolic", "defaulted", "cesarean", "dialling", "rescinded", "chitika", "tsvn", "discoloration", "chelan", "morel", "iles", "kashmiri", "stacie", "collages", "enabler", "ogo", "mowbray", "schuler", "finlay", "gezondheid", "ylang", "lufkin", "tenge", "acosta", "turbotax", "herbals", "moderates", "piotr", "chairmanship", "covad", "comunidad", "moores", "hurghada", "malformed", "mks", "seatbelt", "dumbbell", "chasers", "hamer", "sherwin", "redissemination", "stine", "mcmullen", "skopje", "gpx", "supplementing", "lowrider", "liaise", "citric", "opentype", "jpmorgan", "nitride", "achievers", "unbonded", "cowen", "subdir", "rehearing", "balmain", "crissy", "nake", "wtp", "scn", "mendota", "makoto", "alloc", "ultradev", "viaggio", "cig", "scipy", "depositary", "redhill", "caveman", "nunez", "starfire", "whitlock", "pelletier", "lanark", "yada", "sandro", "jervis", "placemats", "pathologic", "darden", "bunnyteens", "gordo", "otitis", "ordinators", "bma", "leningrad", "harkin", "eatery", "peony", "economia", "cytosolic", "glycerin", "tailings", "shirtless", "darla", "rayman", "boardhost", "frontera", "crumpler", "hargreaves", "mkportal", "nucleon", "pkc", "dov", "ndt", "hideout", "lrs", "calcite", "fpu", "fts", "spud", "mang", "nology", "luiz", "belden", "lense", "hendrick", "publicati", "unverified", "untapped", "vario", "pmsa", "recensioni", "xq", "tev", "batty", "briscoe", "dwr", "fingernails", "ocarina", "camus", "mackinac", "itis", "saks", "hahahaha", "romenesko", "croc", "ftes", "keyspan", "aoe", "reposted", "cgs", "moduli", "mra", "ery", "payoffs", "tpi", "maywood", "buchan", "roberson", "defrost", "ecr", "coleraine", "arianna", "biomarkers", "consecutively", "bongs", "loox", "idrc", "pretzels", "anmelden", "vdd", "underdeveloped", "mktg", "yancey", "feta", "peres", "assemblyman", "enforcer", "suk", "customarily", "cillin", "jett", "bility", "mingw", "ltv", "sarees", "aaas", "bloopers", "framemaker", "piscataway", "cytoskeleton", "wuhan", "maximising", "hoists", "fichier", "amitriptyline", "sgr", "scrubber", "gratuites", "reentry", "playtex", "communi", "buisness", "freepics", "kbit", "marmaris", "logarithm", "granola", "inefficiencies", "monocular", "kankakee", "tandy", "ferrite", "formato", "gaysex", "dbus", "autorun", "nivel", "ayatollah", "undifferentiated", "flowershop", "evp", "vazquez", "reaffirm", "dynix", "pictur", "collette", "oooo", "dian", "doxycycline", "weblogging", "cluttered", "sportsmanship", "relievers", "hwa", "vikram", "booktopia", "lampoon", "airtight", "firming", "mrtg", "shoreham", "annular", "hallmarks", "sparking", "anale", "ikon", "lanl", "gfdl", "commandline", "usfws", "adic", "nns", "pmd", "rfd", "ized", "rsd", "guardianfilms", "gryffindor", "ror", "blogspot", "thao", "obsolescence", "linguists", "blogads", "xinjiang", "recode", "onus", "heinlein", "oks", "kimble", "reservists", "blaupunkt", "statins", "descendancy", "obsoleted", "phim", "betacam", "mlp", "rearrangement", "disulfide", "myer", "bypassed", "onefit", "interp", "neutralizing", "tirana", "occupiers", "kingpin", "bnm", "relaying", "bga", "amilo", "overlord", "daffodil", "ukiah", "devotionals", "figueroa", "imd", "warenkorb", "dfo", "habib", "archivos", "lymphocytic", "kala", "deering", "undetectable", "infact", "vermeil", "silage", "ejaculate", "smithers", "gaeilge", "swr", "goudy", "inkl", "bilge", "texto", "satb", "prolactin", "bejeweled", "bastrop", "sunbelt", "chewy", "paginas", "decimation", "coen", "hypotension", "stateful", "pypy", "busby", "gaither", "tta", "patterning", "rdp", "cheep", "ldr", "denbighshire", "wittgenstein", "preexisting", "coffeemaker", "braveheart", "pbr", "ctt", "ginsburg", "superconductivity", "eurostat", "kyi", "amygdala", "corrie", "lonestar", "dueling", "challengers", "reshape", "photoset", "electrolytic", "hasegawa", "gainers", "calidad", "tinkerbell", "aldara", "poway", "physiologic", "optimality", "riyal", "hwn", "dremel", "cerebellar", "dth", "dancin", "summarises", "choy", "heartwarming", "unwin", "strider", "eastlake", "hyp", "cannonball", "mathcad", "skipton", "patently", "bitmaps", "biopharmaceutical", "analytically", "sll", "aramaic", "bogged", "incremented", "homem", "valorem", "publicist", "acb", "muzik", "tempera", "recyclers", "pillsbury", "seach", "intermediation", "lacing", "aggregating", "soundboard", "teapots", "rif", "neb", "archivo", "smartdisk", "boho", "titration", "tschechien", "sef", "boney", "oxidoreductase", "lino", "lcm", "skimmer", "mccullagh", "gats", "extrinsic", "erlbaum", "sketchy", "gooseneck", "bof", "tiffin", "pacer", "battersea", "noname", "gung", "asv", "sasaki", "outboards", "owings", "xue", "tbi", "interlaken", "kampala", "jcc", "tentec", "kilpatrick", "pixmap", "bitty", "pge", "dtmf", "prosser", "ojai", "stethoscope", "monotonic", "ebookmall", "perot", "medien", "kahuna", "washroom", "jacoby", "neurotransmitter", "intercity", "broadview", "micros", "straus", "flack", "amortisation", "pfu", "tonite", "vonnegut", "distros", "teething", "subsector", "mechanistic", "orbis", "flawlessly", "lidar", "frp", "whatnot", "tripartite", "studebaker", "cartographic", "rwd", "preconditions", "gardenia", "adland", "miembro", "irland", "linwood", "biotic", "kowalski", "marymount", "zathura", "highgate", "fudforum", "takeshi", "taro", "mpd", "crowder", "socialize", "scunthorpe", "deepwater", "clickbank", "ruleset", "viscose", "perso", "novica", "manhunt", "pavers", "elks", "aalborg", "occupier", "lunchbox", "euchre", "proporta", "mitosis", "paychecks", "bellaire", "suitcases", "postel", "mdg", "tutu", "paisa", "wbs", "slidell", "psb", "vocab", "mmhg", "clocking", "sks", "hemorrhagic", "plein", "hitchens", "fone", "crores", "classifiers", "novosibirsk", "greenwald", "rtt", "copacabana", "videorecording", "kickstart", "biggie", "neutralization", "pvm", "ksu", "kph", "pdl", "preprocessing", "particulates", "skylark", "llandudno", "squirrelmail", "oviedo", "pauly", "bromsgrove", "starsky", "prion", "simfree", "pennywise", "grier", "apd", "diphosphate", "lbj", "interscan", "pipers", "tronic", "surfside", "tsunamis", "dordogne", "hotlinks", "neely", "jeri", "proteasome", "transl", "goulburn", "vtkusers", "energizing", "butane", "stf", "bluebonnet", "htf", "stmt", "inked", "novatech", "iid", "elektronik", "maturities", "nameserver", "tomlin", "jigsaws", "distorting", "kamikaze", "quaid", "juggernaut", "gordonii", "latrobe", "bboard", "consultancies", "handley", "gramercy", "ccb", "derrida", "mgb", "bioavailability", "ucas", "tdr", "nochex", "lilith", "foreplay", "waas", "mccaffrey", "privatized", "uncovers", "gargoyle", "stockists", "ostream", "lenmar", "mamiya", "mildura", "insn", "bodega", "hardworking", "dockets", "dedham", "ered", "stomping", "kottayam", "carle", "eest", "pondicherry", "mpr", "fiddling", "panamanian", "buyitnow", "bungie", "goya", "superclass", "categoria", "buyback", "uhh", "gigolo", "tmj", "vangelis", "kingwood", "arn", "dorling", "maximization", "wls", "absenteeism", "quantifiable", "pion", "sliver", "leptin", "sxsw", "bummer", "isometric", "retraction", "amboy", "dunning", "grinch", "okeechobee", "shouldnt", "teeniefiles", "gcj", "whatcom", "bbe", "unb", "sws", "hydrocortisone", "cerebrospinal", "susana", "rumba", "bouchard", "yesteryear", "orthotics", "spunk", "superdrive", "jolene", "jalapeno", "propellant", "touchpad", "raisers", "mdma", "confocal", "jochen", "caddo", "dcl", "expatica", "bitstream", "igo", "bartenders", "refilling", "modell", "keighley", "rangefinder", "nostdinc", "oficial", "lanparty", "monza", "sportfishing", "rlc", "exacerbate", "beckwith", "anemone", "equivalently", "duxbury", "zhen", "cordele", "ebel", "ninjas", "milla", "incase", "mva", "zinn", "comercial", "segfault", "wisden", "maingate", "costner", "powerpuff", "gsfc", "lycoming", "regula", "lastminute", "winbook", "talladega", "optiplex", "syrups", "chiles", "estimations", "jaxx", "cercla", "slb", "absolutly", "guesswork", "tradeshows", "javascripts", "irritant", "warcry", "optura", "combinatorics", "graceland", "encino", "disconnects", "castello", "monolith", "mct", "geos", "hls", "intrusions", "glories", "prelims", "kanawha", "yglesias", "squibb", "memset", "edirol", "mandala", "alexey", "homecare", "dugan", "calmodulin", "ameritech", "umar", "timepieces", "nonfarm", "anklet", "wsp", "byrnes", "determinism", "addams", "moeller", "normality", "wiesbaden", "deflect", "taoism", "ikeda", "chakras", "samara", "unsung", "gargoyles", "massaging", "ajmer", "lossy", "mitogen", "hurwitz", "gulliver", "bul", "aerodrome", "darkside", "intensification", "raya", "ruger", "rba", "gennaio", "seaford", "ungarn", "vincenzo", "warszawa", "dillinger", "bandon", "odell", "riddim", "perforation", "cida", "annika", "uart", "tryout", "proxima", "fst", "lladro", "parameterized", "assfucking", "manageability", "crystalspace", "pandas", "choiceshirts", "taa", "servertime", "fmii", "nepean", "tracklist", "indio", "tino", "bernal", "hbr", "homogenous", "policyholder", "distributional", "tidewater", "ngfl", "erlang", "starz", "follicular", "grupos", "oq", "gonorrhea", "blaqboard", "listeria", "afaik", "lawmaker", "datatypes", "arie", "flavorful", "apu", "fyrom", "refunding", "subcontracts", "moissanite", "finchley", "mediates", "polyacrylamide", "bizzare", "standish", "conus", "competences", "jtag", "compatability", "millville", "coches", "biathlon", "mico", "moxie", "biff", "paulette", "chania", "suu", "backspace", "aways", "fugue", "dissonance", "medicated", "initio", "bestality", "hypothermia", "carman", "timberline", "defenselink", "sunfire", "mckean", "smithville", "mtf", "rebooting", "storytellers", "lamisil", "morphing", "chua", "sevenoaks", "haplotypes", "fiskars", "speer", "lathes", "refillable", "yearbooks", "engin", "kyushu", "tricycle", "penne", "amphetamines", "systemworks", "keele", "afficher", "trillium", "nena", "bulfinch", "transients", "hil", "concedes", "swot", "howarth", "andante", "farmingdale", "bitching", "overtly", "rateitall", "tubulin", "gmx", "bannister", "omer", "humanoid", "infringements", "stylebox", "tiredness", "branden", "panning", "wasabi", "morecambe", "hawkesbury", "cocksucker", "sak", "kilobytes", "breather", "slu", "adjudicated", "methylene", "wholeness", "gnue", "gynecol", "uas", "nacogdoches", "simcity", "hummingbirds", "garnier", "kath", "cppflags", "educause", "cotswolds", "heifers", "sephora", "joao", "tremblay", "gynaecology", "vertebrata", "blackcomb", "ffxi", "ottomans", "rodin", "ecac", "actu", "nde", "lockable", "dslr", "evaporator", "antihistamines", "uninstaller", "airliner", "bibdate", "unwrapped", "dumbass", "brc", "arrhythmias", "netweaver", "sateen", "rtos", "eip", "moteur", "fotopage", "uhm", "birr", "autosomal", "protec", "purim", "rhododendron", "canadienne", "profes", "pjm", "ddl", "underlay", "granule", "setfont", "cookin", "gillett", "rocklin", "welland", "ageless", "nuernberg", "bleep", "emedia", "regensburg", "gama", "xfree", "sills", "berwyn", "howler", "hardtop", "carded", "lipo", "zandt", "reformatted", "internment", "dominick", "mahmood", "avent", "swaying", "igloo", "ambler", "voyeurism", "bachman", "referential", "hydrating", "adaware", "dewpt", "repressor", "galego", "neilson", "scorecards", "newlines", "arcana", "aau", "transworld", "nmc", "discoideum", "wairarapa", "fogerty", "beit", "heidegger", "backhoe", "leftists", "quinnipiac", "mannequin", "malloy", "enviroment", "mako", "anl", "noyes", "eprom", "trashed", "ryanair", "betsey", "rath", "lobbies", "silvertone", "cupcakes", "artest", "netfilter", "voldemort", "oldenburg", "bazooka", "gerbera", "cient", "psg", "mittal", "camellia", "pronouncements", "fonseca", "rescind", "asps", "asheron", "mance", "viggo", "qar", "hepatocellular", "styrofoam", "malfunctions", "lindner", "linc", "salida", "dunwoody", "dioxins", "shaq", "epmi", "excavator", "adolescente", "redcar", "urac", "oncolink", "cartoonstock", "cwm", "bibb", "gymnast", "inexpensively", "isystem", "evol", "nmda", "hazen", "davide", "forceps", "motherfucker", "ccw", "mainframes", "sapulpa", "costas", "searcy", "labelle", "adjoint", "mclennan", "killa", "lipscomb", "monocytes", "requestor", "cyn", "splint", "digitech", "mrnas", "llamas", "multifaceted", "gamez", "voorhees", "boas", "solvay", "thorsten", "yeo", "terk", "privatevoyeur", "coolmax", "rebooted", "toskana", "unidiff", "radionuclides", "tilburg", "decoys", "pariah", "offerors", "wmi", "darnell", "meaty", "gages", "zapata", "supt", "bartleby", "vermeer", "pinstripe", "hemodialysis", "artis", "tov", "amateursex", "dailey", "egret", "cornhuskers", "fontconfig", "jordans", "guildhall", "hasselblad", "piney", "unbundled", "kusastro", "onclick", "functioned", "toca", "houseware", "kdebase", "ysgol", "griggs", "nicd", "mdp", "umi", "fullmetal", "pappas", "aransas", "tacacs", "movem", "abundances", "oulu", "fractionation", "cdb", "blitzer", "ruc", "karte", "cashflow", "retouching", "brattleboro", "eprops", "cya", "ubud", "fmri", "infosys", "displacements", "jerez", "dhc", "ielts", "fellas", "mno", "picturemate", "unicorns", "playroom", "dandruff", "albers", "discworld", "leaved", "existance", "unionists", "bloodlines", "follett", "irn", "ramsar", "woodburn", "efs", "auk", "lockergnome", "oocytes", "armadillo", "bsr", "captiva", "rinehart", "brom", "tlp", "gensat", "filers", "lle", "retrievers", "pacifier", "thurmond", "stroudsburg", "dominik", "vivek", "nla", "inmarsat", "unprofessional", "hydrographic", "mcadams", "wailea", "nforce", "scones", "paediatrics", "nzdt", "ilog", "finkelstein", "candylist", "appalachia", "marist", "musgrave", "vakantie", "varanasi", "yushchenko", "relativism", "jardine", "schuylkill", "ericson", "schweizer", "stravinsky", "keds", "ananda", "nsx", "jud", "tripwire", "aves", "rediscovered", "headstone", "depleting", "junkyard", "perma", "copthorne", "multitasking", "distrib", "byob", "tunstall", "hager", "spearheaded", "nacho", "underlining", "heshe", "jcr", "catalogued", "rawlins", "springville", "differentially", "powwows", "tsui", "inductor", "chalabi", "encephalopathy", "grote", "ebs", "raipur", "custodians", "guardia", "jlo", "khalil", "overstated", "webtv", "insulators", "kass", "weds", "servizi", "quicklink", "qso", "dumbest", "prowler", "loadings", "epos", "sizzle", "desalination", "copolymer", "duplo", "lawnmower", "skf", "nontraditional", "piet", "ghaziabad", "dredged", "vct", "marcasite", "kamp", "scoliosis", "arwen", "artie", "fifths", "austell", "fernie", "carport", "dubbing", "weblist", "maximo", "bax", "searls", "scuk", "uiuc", "crustaceans", "yorkville", "wayback", "gcg", "ural", "calibur", "girona", "haig", "perk", "zander", "samir", "freee", "avia", "developement", "pptp", "beac", "urbanized", "trentino", "marzo", "dfl", "lpa", "jiri", "mccollum", "affymetrix", "bevan", "ichiro", "dtt", "cofe", "loyalist", "verma", "daybed", "rimes", "quimby", "barone", "thomasnet", "koeln", "endocrinol", "evaporative", "gwybodaeth", "preshrunk", "hezbollah", "naga", "mmu", "februar", "finalizing", "printhead", "blanton", "zellweger", "manhole", "eroding", "emap", "searchgals", "typewriters", "tabasco", "cpb", "coffman", "lsm", "rhodesia", "halpern", "purebred", "netapp", "masochism", "millington", "bergamot", "shutout", "willson", "chown", "prosthetics", "proms", "zk", "karol", "underlines", "mosh", "bakelite", "kirkby", "intermountain", "holtz", "prensa", "vegf", "galesburg", "lba", "klondike", "webstat", "reeder", "neoplastic", "applesauce", "fibreglass", "kenji", "gluon", "feisty", "hynes", "clogging", "nonverbal", "etoile", "orangeburg", "ladybird", "concat", "milliken", "byproduct", "specializations", "chaintech", "swa", "porterville", "kbyte", "bizwiz", "congruent", "boehm", "selva", "rainey", "aphis", "rfs", "tarantula", "egovernment", "udf", "snuggle", "shang", "batten", "inop", "lough", "vigrx", "trios", "bvi", "unallocated", "nau", "condiciones", "wss", "modi", "componentartscstamp", "dyk", "maldon", "xantrex", "dlg", "edx", "karzai", "navi", "brockport", "cort", "softgels", "engravers", "wether", "hangin", "handicaps", "associazione", "khu", "nfb", "dohc", "clu", "capps", "vijayawada", "griffon", "biologics", "bluescript", "instantiate", "paperweight", "dilation", "izzy", "bedspread", "knudsen", "jabberwacky", "kiowa", "overtones", "gsr", "faithfull", "quezon", "pragmatism", "rct", "usi", "wiretapping", "fabricate", "exabyte", "pitty", "kcl", "pendragon", "opment", "kva", "meeker", "bootlegs", "jimbo", "jarrow", "mullin", "gridsphere", "activesync", "macwarehouse", "vela", "wikiusername", "hessen", "eyelash", "gob", "antifreeze", "beamer", "feedblitz", "harvick", "clicker", "immobilized", "dalmatian", "hemodynamic", "reshaping", "contessa", "elc", "stagecoach", "googling", "maxpreps", "jessup", "faisal", "ruddy", "magazzino", "jippii", "academe", "fjord", "flybase", "alpena", "psl", "junebug", "grissom", "shiki", "knockoff", "kommentar", "westpac", "gosling", "novosti", "mendel", "adtran", "wasserman", "transexuais", "aslan", "hoge", "fouling", "macfarlane", "hideshow", "trailhead", "edg", "bayshore", "preprints", "grs", "duction", "anesthetics", "nalgene", "iaf", "khao", "berhad", "savedrop", "magnifiers", "chitty", "goldwater", "lesbiens", "jumpin", "payables", "victimized", "tabu", "inactivated", "respirators", "ataxia", "mssql", "storylines", "camaraderie", "carpark", "internetworking", "gawk", "planing", "termini", "avaliable", "scho", "buysafe", "hds", "iad", "pleasantville", "fabrications", "wtd", "loh", "jamshedpur", "denture", "gaudi", "bluefield", "telesales", "vpc", "ppr", "jetsons", "protagonists", "fjd", "anoka", "boliviano", "curtiss", "wagoner", "storyboard", "trol", "rajiv", "xfce", "axons", "dmso", "immunotherapy", "namorada", "neva", "zakynthos", "weitz", "quercus", "nhhs", "amara", "microcosm", "raia", "bizarro", "mehmet", "christos", "categorically", "autoresponder", "aad", "adolfo", "welwyn", "nzlug", "vci", "catnip", "whittington", "sorel", "boned", "vittorio", "seta", "tomasz", "annes", "tonka", "nath", "toth", "tomaso", "ascap", "livedoor", "schlampen", "altamonte", "scotweb", "pillowcases", "medlineplus", "ambiente", "masterson", "nlc", "fibonacci", "bridgeton", "wmds", "tyrrell", "junky", "ballasts", "jbuilder", "cnf", "nagano", "hardman", "roadmate", "interleaved", "peirce", "pusher", "egm", "thetford", "rtm", "gnostic", "coreutils", "uninstalling", "heft", "ambivalent", "startpage", "difranco", "mmi", "typist", "estudio", "seiu", "moisturizers", "cardiol", "lamination", "bibi", "mof", "carpe", "scottie", "blackrock", "pons", "fistful", "somethings", "itl", "staffer", "rhiannon", "linspire", "cornucopia", "newsfactor", "countering", "worldpay", "catan", "almaty", "appraise", "runny", "braunfels", "reorg", "icg", "javax", "sema", "albumlist", "heraklion", "stressors", "shg", "collocation", "mccauley", "vesicle", "stuffers", "prego", "ichat", "lubricated", "sinha", "pharmacia", "aggiungi", "shakin", "cyr", "vce", "vigilante", "gauging", "lipase", "constabulary", "biochim", "epcot", "cricketer", "defibrillator", "rcn", "drooling", "stoll", "staines", "tnd", "adversarial", "tbn", "softwa", "pbc", "ptp", "demonstrator", "boingo", "voyeurs", "aoki", "banerjee", "hondo", "hysteresis", "workspaces", "campion", "lugano", "mobilisation", "pruitt", "foals", "aciphex", "sculpt", "iskin", "soledad", "bagpipes", "devaluation", "beastyality", "segway", "mineralization", "grc", "trafficked", "stedman", "gurl", "mcginnis", "dvips", "klee", "garber", "wizardry", "fervent", "headrest", "dermatol", "chaperone", "huygens", "eurythmics", "transboundary", "reclassified", "delusional", "tosh", "pimpin", "husqvarna", "faxpress", "tinkering", "unneeded", "babar", "pago", "hussey", "officeconnect", "mickelson", "leukocytes", "wesnoth", "hydride", "npp", "zondervan", "pele", "opeth", "kottke", "hometwat", "ogm", "mauna", "kilns", "bpi", "kst", "harbin", "assemblers", "karst", "wada", "selfless", "gynecologists", "enewsletters", "willi", "bip", "nami", "guestbooks", "sharjah", "aguirre", "krug", "dongs", "drv", "schoolers", "kidnappers", "lemmon", "ilan", "gnutella", "deutsches", "liquidator", "evers", "uniross", "grassley", "stowaway", "brainer", "organiza", "cellog", "channeled", "tastings", "deccan", "aiaa", "neurosciences", "factorial", "librarianship", "texmacs", "vocabularies", "blasters", "livable", "tifa", "nant", "libjava", "ramblers", "counterproductive", "catskill", "environmentalism", "ufs", "gwalior", "ubl", "kilts", "balenciaga", "alamitos", "newsburst", "septum", "animators", "signifi", "neoclassical", "mediaeval", "piezo", "escudo", "pineville", "botanica", "petter", "adenine", "fren", "lysis", "pastas", "helicase", "dredd", "efinancialcareers", "diehl", "kiley", "kwd", "ihousing", "yoruba", "malformations", "embarassed", "alexia", "checkup", "commited", "nanotube", "becta", "trados", "portofino", "lifesaving", "danh", "sctp", "tayside", "rani", "playmobil", "tualatin", "razorbacks", "ionized", "perodua", "trg", "subst", "cpap", "molex", "vitara", "fostex", "zmk", "placental", "parses", "saic", "newsmakers", "dshield", "homocysteine", "juego", "metamorphic", "cld", "otcbb", "moet", "rado", "watchguard", "sugarland", "singularities", "trophic", "ekg", "dacia", "reversi", "insemination", "houma", "quetzal", "shoshone", "linder", "homing", "highbury", "eizo", "podiatrists", "conch", "crossref", "hda", "poppins", "chaim", "cytotoxicity", "xugana", "weevil", "integrations", "clarkston", "ritek", "morgue", "unpatched", "kickers", "referers", "kitt", "servizio", "biosecurity", "leviton", "twl", "etx", "electrification", "peninsular", "juggle", "yeshiva", "sociologist", "wsc", "sartre", "finitely", "spect", "kathie", "ards", "corny", "brazilians", "lundy", "histocompatibility", "woolwich", "irp", "handango", "cosgrove", "sulfuric", "renderings", "msh", "trt", "ldcs", "lect", "kollam", "edgerton", "bulleted", "acupressure", "thotbool", "hiawatha", "nhfb", "ahps", "operon", "ugandan", "paton", "suspends", "categorie", "stratigraphy", "howes", "surfed", "steins", "babu", "andrade", "agarwal", "ncd", "surefire", "cori", "planetside", "snorkelling", "waterworks", "luk", "headlamps", "anaesthetic", "isomerase", "fdisk", "dunstable", "awb", "hendon", "accreditations", "doral", "nta", "macadamia", "takin", "marriot", "bfs", "disqualify", "ttp", "sixt", "beazley", "rashes", "najaf", "hwg", "bukit", "antiaging", "psychol", "dfe", "bedingfield", "equated", "swig", "lightscribe", "unionist", "lytham", "clocked", "duced", "complementing", "keycode", "pennants", "camas", "eamon", "zaurus", "qnx", "srx", "delux", "uli", "grrl", "bookie", "boggling", "skewers", "richman", "photodisc", "oto", "uav", "cnhi", "umberto", "bautista", "zooms", "newsdesk", "roadblocks", "klum", "goh", "goebel", "pou", "homophobic", "diamondback", "foosball", "rept", "spurgeon", "lumberjack", "marv", "epidermis", "mobley", "oktoberfest", "photoshoot", "rhinoplasty", "peptic", "bauman", "tannins", "psychotropic", "tilley", "malaya", "hypothalamus", "shostakovich", "scherer", "tsh", "manipulator", "calabasas", "coromandel", "pliner", "timestamps", "pango", "edexcel", "snc", "nim", "gwaith", "breaststroke", "oroville", "mitsumi", "ichi", "mobius", "deductibles", "nikola", "berrien", "peacemaker", "ilia", "bookmarked", "letterbox", "halal", "agl", "noor", "noll", "filenet", "freeland", "kirsch", "roadhouse", "charted", "microtubule", "cubicles", "blau", "ladysmith", "gatti", "ection", "switchable", "mcminnville", "hcm", "interactives", "altus", "phospholipase", "transformative", "samuelson", "completly", "anhydrous", "germplasm", "gradzone", "gdansk", "jenner", "parkin", "unmoderated", "wagers", "beliefnet", "hotbar", "canis", "ravioli", "enrolments", "walling", "marblehead", "dvt", "cameltoes", "ribosome", "carnivals", "srf", "speedman", "instrume", "moffett", "augustana", "topsoil", "latifah", "isomers", "pettit", "lemans", "telescoping", "gamedesire", "koha", "balancer", "picton", "underhill", "dinghies", "chooser", "argentinian", "ahrq", "apparels", "timescales", "cef", "athenian", "mcewan", "sexshop", "zermatt", "mha", "geert", "bugging", "trento", "lyndhurst", "nex", "wdc", "symbiotic", "wds", "dyslexic", "nomic", "tecnica", "mmap", "wishbone", "mcad", "prm", "bashir", "licenced", "larissa", "collab", "squirter", "infecting", "penetrations", "protea", "argento", "polyvinyl", "ganglion", "ruud", "bunt", "solgar", "lipper", "chimpanzees", "jdo", "testcases", "tda", "hamza", "meeks", "athol", "centimeter", "excreted", "paros", "azzaro", "nappa", "sirna", "sexvideos", "nonprescription", "lyd", "firework", "crlf", "localize", "tablatures", "jndi", "vigorish", "dcd", "schulte", "gioco", "chested", "universit", "thrivent", "jie", "hydrothermal", "smalley", "hoke", "ramen", "coleoptera", "intensifying", "copyleft", "llb", "outfitted", "khtml", "chatterjee", "adoptee", "augusto", "resnick", "intersects", "grandmaster", "nusa", "deadball", "cksum", "historiography", "amistad", "bellacor", "trcdsembl", "campagnolo", "downgrades", "sexbilder", "scrapping", "pdoc", "haskins", "bullhead", "rhett", "mimosa", "wildfires", "ellyn", "hryvnia", "halved", "cfml", "vatu", "ecademy", "dolore", "shauna", "multilink", "funchal", "ximian", "bergamo", "quarterfinals", "hobbyist", "reardon", "homozygous", "glyn", "popset", "torsten", "puller", "mathworks", "namm", "dena", "mdksa", "dcom", "danskin", "bexar", "dinning", "pfd", "misfit", "hamden", "hardie", "redfield", "scotus", "quotable", "cranfield", "asides", "beacuse", "musicstrands", "kla", "unternehmen", "teg", "roseland", "pgbuildfarm", "volo", "zirconium", "noelle", "httpwww", "agement", "guan", "tcf", "opencube", "shao", "mears", "rectification", "omc", "duisburg", "pows", "hsphere", "entertai", "keeler", "highpoint", "stratospheric", "newegg", "preeminent", "nonparametric", "mistral", "percocet", "zeroes", "kth", "divisor", "wanderlust", "ugc", "cleat", "decentralisation", "shite", "verna", "immediacy", "trak", "swingin", "eckert", "casco", "olivet", "resi", "bergeron", "felonies", "gasification", "vibrio", "animale", "leda", "artesia", "casebook", "nhc", "gruppo", "fotokasten", "yaw", "searing", "detonation", "gse", "approximating", "hollingsworth", "obasanjo", "pinewood", "tangential", "ridgway", "headhunter", "ero", "sharkey", "clwyd", "bretton", "bustier", "apologizes", "manoj", "muskogee", "pismo", "resortquest", "diskeeper", "lathrop", "pala", "glebe", "xterra", "pml", "seahorse", "geneve", "wpointer", "softener", "breaching", "maelstrom", "prioritizing", "jsa", "annunci", "modelos", "seraphim", "raymarine", "dodgeball", "munity", "assfuck", "alopecia", "singaporean", "nowak", "keyboarding", "beachside", "sparco", "robeson", "navbar", "fsr", "contribs", "lineages", "sumitomo", "dermatologists", "marbled", "probleme", "irv", "blackmore", "bothersome", "draconian", "troup", "approver", "pcgs", "saville", "srinivasan", "poldek", "perfor", "articular", "gwynn", "trackball", "asis", "mansell", "unf", "werewolves", "magazin", "sible", "vla", "autocorrelation", "waltrip", "mombasa", "schroder", "alachua", "hks", "duns", "ornl", "cabrio", "guanine", "bridgetown", "rhsa", "luka", "cpf", "roadstar", "creditcard", "frf", "michaela", "willett", "brews", "baskin", "hamel", "zoids", "semantically", "cagliari", "eggert", "valkyrie", "airlie", "salas", "gnomemeeting", "benji", "nent", "cashew", "unproven", "myocardium", "kap", "gini", "prek", "cypher", "paraiso", "nightline", "cursive", "organises", "hydrated", "csk", "schwanz", "martinsburg", "liguria", "hsieh", "forties", "pgc", "sayre", "photosynthetic", "pips", "tongued", "lifetips", "walcott", "cname", "unapproved", "emm", "nematodes", "jaclyn", "kell", "gremlins", "bolero", "togethers", "dicom", "paroxetine", "vivien", "gpr", "bru", "ilt", "lished", "tortola", "mav", "powertrain", "telkom", "immunized", "nuneaton", "fica", "trulia", "ricochet", "kurosawa", "aberrant", "nld", "ukr", "wyandotte", "odpm", "pgk", "dumber", "ruptured", "insoles", "starlet", "earner", "kem", "radiologists", "polydor", "nutraceuticals", "zoomed", "groupie", "brinkmann", "thrombin", "aco", "laminar", "immunoglobulins", "jamnagar", "camber", "vxi", "colliery", "incubators", "procimagem", "sweeties", "landfall", "seanad", "intramurals", "kwok", "borderless", "methyltransferase", "suwannee", "lgs", "cjd", "hyperlinked", "birkenhead", "torrevieja", "purposefully", "gutted", "serveur", "grr", "morrell", "ouachita", "imran", "slat", "freeways", "multithreaded", "newlyweds", "documentum", "ebm", "xiang", "burnin", "reelection", "hales", "rutter", "uunet", "vitreous", "noord", "centrelink", "lempicka", "iru", "countable", "dolomite", "salvaged", "soyuz", "frick", "lwp", "afterglow", "ferent", "maes", "mandi", "secunderabad", "millwork", "sampo", "takedown", "colostrum", "cfnm", "judeo", "wisc", "lata", "sexi", "homies", "tarmac", "customisation", "conservator", "pipettes", "goon", "artefact", "expository", "complementarity", "cosco", "mercosur", "tfm", "benzodiazepines", "mii", "netmask", "stalling", "molnar", "hmso", "huw", "aliso", "decors", "oldman", "nuevos", "acis", "somthing", "zabasearch", "steuben", "minicom", "hausfrau", "goldfields", "rickey", "minichamps", "usagi", "bisexuales", "rothman", "shana", "srivastava", "oemig", "beefy", "senha", "pica", "pucci", "skits", "shenyang", "mussolini", "kootenay", "ethnology", "donohue", "cyc", "childers", "mahjongg", "davao", "tajik", "codemasters", "mydd", "charade", "arnhem", "bobbin", "istudy", "rugrats", "dancewear", "mechanized", "ject", "mayes", "canmore", "reassigned", "nnnn", "crema", "bursa", "cfu", "svm", "riccardo", "realvideo", "lites", "krall", "centrifugation", "welds", "braunschweig", "coptic", "securityfocus", "reorganisation", "conglomerates", "dehumidifiers", "dumper", "hamill", "halston", "iau", "wfc", "spiny", "arezzo", "mbeki", "invisionfree", "dropkick", "elastomer", "wahoo", "anagram", "fogdog", "finnegan", "gof", "newsworthy", "defs", "sensitization", "hyperactive", "sidi", "antenatal", "elektro", "nordsee", "yuna", "pluggable", "hemophilia", "kola", "revitalizing", "seepage", "alitalia", "orale", "wri", "ory", "bcf", "wooten", "nonviolence", "baume", "berkman", "ashdown", "diciembre", "purports", "fcuk", "shillong", "mondial", "brushless", "technicolor", "narragansett", "barenaked", "pandagon", "rehabilitated", "outdoorliving", "expendable", "ponca", "tigard", "soulmate", "kaine", "maxis", "poppers", "allposters", "commercio", "dods", "tsl", "volusia", "iic", "thm", "elibrary", "datebook", "rapists", "ultrasparc", "seabed", "orly", "complicating", "suzi", "texturing", "correspondences", "groomsmen", "avo", "latour", "manipur", "arnett", "suzhou", "headboards", "cil", "palomino", "kol", "pomeranian", "diptera", "gericom", "steiff", "cordis", "erythrocyte", "myelin", "fragility", "drucken", "reso", "hov", "tsukuba", "kustom", "invoiced", "hannigan", "hangul", "montauk", "modulators", "irvington", "tsang", "brownian", "mousepads", "saml", "archivists", "herringbone", "bodom", "harrahs", "daiwa", "juanes", "nids", "moorcock", "ccu", "eyeliner", "totalled", "syp", "woken", "aphids", "cutthroat", "coincidental", "lepidoptera", "buda", "tarrytown", "vaseline", "bluewater", "strontium", "burdick", "crustal", "hackman", "shopnbc", "aicpa", "psal", "albicans", "seduces", "epps", "kroll", "unambiguously", "staley", "cutbacks", "hemet", "ariana", "pch", "cgmp", "mcas", "multimeter", "anubis", "htr", "analyte", "peseta", "enh", "glitz", "kewl", "bidi", "winsock", "lvs", "moldings", "peltier", "iod", "ior", "trackmania", "ballets", "doylestown", "spaceflight", "quicklist", "proportionality", "overruns", "yadav", "sordid", "qpf", "mentorship", "lyx", "tained", "oligonucleotides", "bbci", "spidey", "videotaped", "regnow", "jukeboxes", "xpdf", "portishead", "irt", "splunk", "kommentare", "citywire", "crud", "nev", "febs", "adu", "ird", "ribeiro", "abrahamsson", "epidemiol", "coms", "vdo", "outro", "pneumococcal", "tilton", "brookstone", "apic", "avenge", "alleviating", "sportif", "inservice", "punts", "tives", "sora", "tgs", "daugherty", "yarrow", "wakeup", "meatloaf", "mumford", "datafile", "buchen", "zzzz", "objectclass", "polices", "dogging", "cursus", "plasminogen", "kinsella", "lindgren", "asymptotically", "duce", "wonderwall", "crick", "pvd", "enveloped", "mnfrs", "caseiro", "instabilities", "muskoka", "jeni", "thalia", "apac", "reforestation", "paradoxically", "dren", "dubbo", "inductors", "opin", "symlinks", "gamestracker", "secam", "gatorade", "irm", "cava", "rupp", "wacker", "lanta", "cres", "yue", "oligo", "chairpersons", "incesto", "spca", "zapper", "materialized", "accolade", "memorized", "squidoo", "interpretative", "roping", "rauch", "oxymoron", "reciever", "maryann", "pentagram", "viv", "infusions", "slvr", "choppy", "robotech", "spb", "servic", "saya", "univeristy", "bahamian", "gos", "fwy", "nocd", "stipends", "stirlingshire", "caerphilly", "riboflavin", "fiu", "kalb", "ubiquity", "vandal", "romper", "bitumen", "nolo", "shimizu", "postpost", "rummy", "paleo", "unrhyw", "pinscher", "constructively", "sufjan", "christiane", "spliced", "finca", "gpf", "iaa", "iesg", "brecon", "kiran", "trekearth", "repeatability", "gunning", "byblos", "tadpole", "mitsui", "storytime", "berserk", "wellman", "cardiologist", "jammin", "leis", "hirst", "fellatio", "ggc", "terran", "breadcrumbs", "lorena", "remaster", "tpg", "cifrada", "curvy", "envisage", "boneca", "basements", "sharpton", "crucially", "lfn", "imao", "antonin", "soundgarden", "carrara", "bron", "decoupling", "monroeville", "environmentalist", "msha", "eastenders", "adultfriendfinder", "bein", "stef", "fpgas", "mistreatment", "rbl", "qlogic", "shona", "sutcliffe", "previousprevious", "infective", "estrella", "gans", "shards", "vcds", "acadian", "kahului", "phonetics", "comittment", "blix", "biocompare", "whimsy", "frameset", "kot", "nyack", "lolo", "carboxylic", "pkgconfig", "dipartimento", "traceback", "svlug", "microdermabrasion", "waterbody", "jeeps", "tiverton", "wundef", "spay", "gilmer", "ceqa", "bodog", "followups", "internat", "biarritz", "gurps", "bessemer", "iceman", "pegged", "liberator", "rediscover", "lovecraft", "wavefront", "bhangra", "zuni", "epm", "meningococcal", "ketone", "glazer", "yashica", "geodesic", "congruence", "tenkaichi", "omani", "tenuous", "reuter", "surfactants", "cohomology", "epicenter", "toke", "dwf", "santas", "kutcher", "christo", "lucio", "phenomenological", "debriefing", "miniskirts", "ansmann", "mfps", "lentil", "kannur", "backer", "albedo", "flsa", "pauli", "mcewen", "danner", "angora", "redstone", "lxwxh", "informacion", "phyto", "libpam", "blo", "cocky", "pitchfork", "stratocaster", "mohegan", "brazzaville", "broussard", "beano", "interconnections", "willa", "toiletry", "sats", "beko", "exchangeable", "colm", "arabe", "stretchy", "starburst", "dzd", "neurologist", "leonards", "kitties", "dottie", "rspb", "fwrite", "homicides", "forde", "ipf", "travelpro", "haemophilus", "ronny", "hubris", "bottomline", "kosova", "neuropsychological", "genitalia", "waiving", "swirls", "dampers", "comhairle", "cheech", "eigenvectors", "extrapolated", "chaining", "defected", "yurasov", "gakkai", "justia", "campylobacter", "northumbria", "seidel", "kenseth", "pmr", "kare", "dumbo", "holocene", "jwin", "superconductors", "yeung", "polygram", "egon", "distillate", "unweighted", "gramm", "safeco", "bentonville", "ishikawa", "vuv", "strachan", "bayard", "escalator", "periwinkle", "breakin", "rsmo", "publishi", "darmowy", "outfile", "choreographed", "obrazki", "accross", "yag", "gravesend", "lovemaking", "boucheron", "farrow", "annulment", "kwai", "tubbs", "bartow", "tonbridge", "lesbico", "panerai", "spate", "belladonna", "lexi", "sobering", "carcinogenicity", "djf", "semis", "pcv", "suppressors", "leachate", "dingle", "mbendi", "celina", "hydroponic", "hoyer", "xia", "kovacs", "recalculate", "maltreatment", "hitchin", "medtronic", "meerut", "whsmith", "fontsize", "relaxes", "kis", "halos", "cracow", "saco", "webcomics", "ife", "sauder", "dioceses", "uct", "postdoc", "biceps", "leela", "hydrant", "hamstring", "darrow", "tinderbox", "sify", "naw", "ganguly", "streetwise", "imprinting", "dandenong", "colecovision", "gnuplot", "nucleation", "werbung", "prb", "blr", "croce", "deviance", "goldfrapp", "tetrahedron", "materialize", "homeworld", "foodborne", "baixar", "stagg", "fondness", "ellicott", "merchandiser", "ler", "djia", "eastleigh", "blacklisted", "freetext", "wxhxd", "multiplicative", "metis", "urethra", "dalrymple", "retroactively", "hartnett", "gcd", "kilos", "multivitamin", "vientiane", "koji", "scran", "bwp", "emoticon", "mercator", "lyricist", "macromolecules", "fungicides", "amines", "karcher", "cssa", "freetown", "beneficially", "tugrik", "monotype", "ishii", "kempinski", "pigmented", "mipsel", "ridership", "athenaeum", "twikiweb", "mpm", "faking", "clsid", "kenobi", "endoplasmic", "motorised", "lomax", "geraldton", "eck", "cssrule", "auerbach", "metlife", "apocalyptica", "masa", "risotto", "follicles", "ashtabula", "sussman", "exmouth", "melua", "cvss", "pana", "stimulators", "gnf", "uvic", "asustek", "dieta", "famvir", "conflicted", "retirements", "sixers", "metab", "gregoire", "burris", "creat", "rajan", "brainwashed", "berenstain", "crittenden", "antoni", "gbs", "associ", "yankovic", "gnvq", "rogaine", "kek", "gridlock", "integrable", "chalkboard", "dopod", "unranked", "karlsson", "anaemia", "natur", "permian", "bartley", "unaffiliated", "slrs", "montreux", "partici", "starbuck", "infractions", "karon", "treviso", "backdrops", "turkmen", "standups", "sowell", "aktuelle", "gleeson", "lss", "globulin", "woah", "nte", "midob", "violator", "boxcar", "sagan", "aviso", "pounder", "vieira", "kronor", "tocopherol", "keiko", "newsrx", "lesbe", "pharmacokinetic", "intercepts", "tirelessly", "adsorbed", "ksh", "plunkett", "guenther", "penta", "phospholipid", "reiterates", "wuc", "oversaw", "arraylist", "qy", "outsourcer", "eyeshadow", "pushbutton", "doujinshi", "catagories", "pilar", "paltz", "viaduct", "pugster", "elastomers", "evenflo", "mmk", "wadi", "secularism", "cellspacing", "trekker", "llm", "pakistanis", "glyphs", "neuroblastoma", "loftus", "gigli", "thorp", "seeley", "producten", "glandular", "aligns", "rejuvenate", "grt", "northants", "ifconfig", "sherrill", "wintasks", "xenia", "whangarei", "hra", "expres", "nadir", "recoup", "rnai", "fyr", "franchised", "batchelor", "relocatable", "warhead", "backfill", "fascists", "kedar", "adjacency", "iberostar", "mancha", "gorton", "insta", "jni", "cellpadding", "larnaca", "carmarthen", "endgame", "streamlight", "golan", "thomann", "totten", "curbside", "samhsa", "howrah", "planer", "hermaphrodite", "gavel", "bassinets", "footjoy", "fairtrade", "gah", "prestwick", "paoli", "alben", "laconia", "berkowitz", "inputting", "dimming", "indiatimes", "arcgis", "goof", "landmine", "boracay", "appro", "notifier", "wirth", "valerian", "bucher", "wts", "saad", "weisz", "enrollee", "authenticating", "wheatland", "zildjian", "revisor", "faauto", "profs", "pheonix", "seitz", "administrivia", "foams", "leh", "orbitals", "hammerhead", "dotcom", "xof", "klezmer", "fosgate", "walworth", "niguel", "quickfind", "isakmp", "facia", "stalemate", "multimediacard", "motrin", "glx", "classifies", "ischia", "ankh", "mohali", "incurs", "feist", "ldb", "netzero", "rationalization", "eef", "brokering", "viewport", "isas", "masterbate", "geneseo", "grammer", "garantie", "sanofi", "malignancies", "yaesu", "jpegs", "spitz", "chea", "limassol", "lobbied", "splat", "nostradamus", "gallium", "mobb", "mannered", "dorada", "nalin", "sorbet", "lunenburg", "phc", "tdma", "bodycare", "jobsearch", "sharia", "topiary", "cataloged", "camsex", "avm", "kimber", "extendable", "ager", "pella", "optometrist", "tinh", "bogey", "kana", "pipette", "bln", "coveralls", "teng", "stayz", "isolator", "wicking", "cph", "zany", "umatilla", "austral", "applauds", "taks", "interferometer", "barbican", "ohana", "rebs", "cerf", "criminally", "mkv", "adio", "psychopathology", "lkr", "leyton", "cartoonists", "appellees", "indira", "redraw", "pictbridge", "mahesh", "beng", "ncar", "gord", "nanometer", "faceless", "moyers", "oregonian", "aftershock", "gena", "leggett", "wsdot", "classique", "menon", "spiro", "whiteboards", "strategists", "dnv", "loti", "kaos", "hydrotherapy", "marionette", "islay", "myv", "typeof", "igt", "nitty", "ddb", "quintile", "freightliner", "monkees", "lindley", "dehumidifier", "industrials", "bouncers", "transfered", "mages", "dmb", "roseanne", "chk", "trigraphs", "rer", "bettis", "cyberlink", "browsable", "workhorse", "iterated", "mcfly", "kyd", "pooping", "preferentially", "fraternities", "diuretic", "octubre", "castell", "emerg", "sampras", "gephardt", "zimbabwean", "unexpired", "westmorland", "biscotti", "mavica", "everyones", "shaikh", "nampa", "youngblood", "plana", "refractor", "bouldering", "flemington", "dysphagia", "redesigning", "milken", "xsel", "zooplankton", "gsd", "philatelic", "modularity", "parkview", "keto", "marrone", "wallmounting", "tias", "marengo", "quiche", "epoc", "resales", "maduro", "murrieta", "fairplay", "ddp", "woodinville", "registro", "transcriber", "notarized", "neocons", "franchisor", "diab", "vying", "morehouse", "lauper", "bedspreads", "pooch", "morphism", "gripper", "tavistock", "negated", "javabeans", "nashik", "atomki", "musicianship", "viaggi", "bbn", "cady", "adios", "purview", "bosque", "xxxl", "dyfed", "biomaterials", "overpass", "berners", "goaltender", "speedometer", "ultrium", "carteret", "fatwa", "bottomed", "superscript", "rwandan", "proteinase", "coolermaster", "maca", "haircuts", "crewneck", "discriminant", "bayfield", "mishra", "morey", "multiplexers", "pcga", "stade", "carnivore", "codingsequence", "knowledgealert", "egalitarian", "pombe", "yamato", "jenson", "mortgagee", "middlefield", "iiyama", "schell", "midler", "nags", "caplan", "anyplace", "haridwar", "sternberg", "ventilating", "retreating", "shopsafe", "mohave", "brion", "immun", "zapf", "mingus", "prolly", "trichy", "microform", "olsson", "jdc", "dosimetry", "smelter", "rayovac", "takeda", "mbt", "ied", "dynamism", "fileattachment", "rabat", "devs", "mellor", "manmade", "somaliland", "hashtable", "sdb", "conto", "furtado", "statics", "saleh", "puja", "kamera", "eport", "killian", "rucksack", "janette", "powerware", "phenylephrine", "cupcake", "karp", "bodum", "celular", "zamora", "qian", "dws", "psig", "polycystic", "titts", "krzysztof", "parsippany", "raggedy", "eason", "epg", "bsg", "payloads", "alon", "cebit", "wedgewood", "daten", "pbi", "annexe", "cyclen", "customizations", "stunningly", "hugger", "junio", "jtc", "xcd", "prequel", "strathmore", "champloo", "billerica", "talley", "estoppel", "ameritrade", "torr", "cytomegalovirus", "bpel", "domus", "madigan", "supercool", "ysl", "contaminate", "rxlist", "sailormoon", "ubid", "plovdiv", "mcsweeney", "govideo", "bassinet", "taillights", "typhimurium", "dez", "fci", "visionaries", "salesmen", "nicki", "skagen", "hibernation", "ponders", "rrsp", "middleburg", "innkeepers", "mcauliffe", "gardasee", "pcn", "asce", "aromatics", "interplanetary", "landcare", "towneplace", "downloaden", "discontinuing", "bork", "sealers", "weybridge", "wusthof", "interbank", "hullabaloo", "erratum", "contreras", "sandwell", "novgorod", "earbud", "jds", "coastlines", "echolist", "guntur", "lmp", "trunking", "foxtrot", "rosanna", "patchouli", "inequities", "testes", "defaulting", "alpert", "securitization", "nsfw", "borer", "originators", "postid", "phx", "censoring", "hashimoto", "oriole", "chipotle", "slocum", "ipeople", "rdg", "reusing", "saeed", "wetzel", "mensa", "shiner", "chal", "rhesus", "streptomyces", "datagrams", "invalidated", "shenanigans", "mkii", "sandford", "lennart", "pract", "npi", "travelguide", "championed", "biosolids", "billable", "givers", "tmdls", "cockroaches", "testcase", "faraway", "cfengine", "umbc", "underwritten", "biofuels", "cyberhome", "dinh", "zegna", "tarps", "sociologists", "ellesmere", "ostomy", "vso", "sena", "ingest", "gazebos", "sirloin", "cyclophosphamide", "bitdefender", "catz", "bpp", "giancarlo", "kategorie", "arjan", "valery", "kmc", "insp", "recomended", "dataport", "pfaff", "manuale", "rog", "niven", "mahi", "ghs", "atsdr", "rangeland", "commonality", "xid", "midis", "cwc", "regrettably", "navidad", "yahoogroups", "kaw", "ston", "ves", "pulau", "playbook", "digipak", "jetblue", "kavanagh", "exhibitionists", "armidale", "arquette", "copland", "namib", "cne", "cheapflights", "wyvern", "lucene", "muffled", "vincennes", "inlays", "lockets", "whitey", "brin", "wharfedale", "guyanese", "laryngeal", "outfielder", "nonattainment", "softimage", "cellgroupdata", "literatura", "myoplex", "yorba", "bct", "pva", "slapstick", "cottrell", "dialers", "subculture", "cmx", "modded", "skids", "roselle", "klub", "marathons", "tgt", "skeet", "toucan", "masterclass", "nnp", "calcio", "oxidizing", "alo", "kennebec", "zj", "intergalactic", "biomolecular", "cii", "powweb", "mcwilliams", "phosphorous", "photocopiers", "obligor", "matcher", "listbox", "voigt", "fdl", "dawley", "scribus", "lessors", "npn", "luminaries", "karats", "bridger", "slm", "hadronic", "fairport", "piecewise", "recharging", "dmm", "unionville", "intermedia", "goetz", "urinal", "joystiq", "grosso", "sobaka", "payphone", "rockfish", "duodenal", "uninstalled", "leiter", "coworker", "escuela", "cyclades", "longterm", "taber", "screenplays", "gpt", "shiites", "ntop", "farcry", "jitsu", "lactobacillus", "uniontown", "cloner", "otaku", "hoyas", "kandahar", "kerrville", "akers", "neuropsychology", "multimap", "allston", "femininity", "trask", "accuweather", "deferment", "wam", "fmp", "portlets", "glsa", "westmont", "waders", "cellulare", "homehome", "frogger", "hass", "rya", "seqres", "hellfire", "havering", "montfort", "chokes", "eharmony", "knowsley", "bordellchat", "cvsweb", "houdini", "umr", "canarias", "babyshambles", "bridgette", "cinque", "drezner", "hsin", "alcan", "stas", "outlier", "naira", "neverending", "masson", "khanna", "systeme", "hillsong", "camshaft", "exotica", "milburn", "bijou", "destdir", "innervation", "gga", "oqo", "cunha", "reefer", "techspot", "hibernia", "alpina", "iarc", "constraining", "nym", "dard", "estefan", "fuser", "lepton", "pergamon", "wiktionary", "razer", "poznan", "netscreen", "manda", "npv", "xmb", "kingstown", "topix", "batsman", "wavelets", "cogs", "bigtitsroundasses", "barnhart", "scofield", "ebrd", "desorption", "bellflower", "watertight", "stevia", "photocopier", "haverford", "talc", "penises", "gwendolyn", "buynow", "nairn", "prolab", "lundberg", "backordered", "coh", "mononuclear", "unocal", "brunson", "greenlee", "emer", "txdot", "prichard", "conferees", "renata", "ternary", "footballer", "sisyphus", "directfb", "foolproof", "chastain", "lakshmi", "dsb", "megane", "cdo", "someones", "rebelde", "morrigan", "mymovies", "tiananmen", "immunosuppressive", "mcveigh", "stylin", "brower", "mpltext", "aibo", "pdd", "depositor", "ofcourse", "ecdl", "redenvelope", "acidophilus", "deci", "defensively", "analytica", "cnd", "hrp", "tnr", "tryon", "forgo", "barca", "pahrump", "foros", "pickabook", "hellraiser", "lithographs", "educates", "ediets", "gopal", "signers", "digext", "netbackup", "dimensionality", "triax", "rnase", "aman", "angell", "bochum", "eyepieces", "earbuds", "americablog", "makeovers", "unprocessed", "pfa", "widctlpar", "clausen", "punbb", "centra", "monson", "infogrames", "azt", "xalan", "hydroxyl", "medpix", "interacted", "gpi", "polishes", "canoga", "numismatic", "avoidable", "brantley", "adenoma", "aah", "prostaglandins", "powercolor", "beaconsfield", "lakhs", "mhd", "lesbisch", "flammability", "truancy", "jharkhand", "channelweb", "givn", "flatiron", "midlife", "guerin", "indianola", "unavailability", "rooter", "wanaka", "lompoc", "widener", "cll", "kmail", "websense", "vmi", "residencies", "cablevision", "pye", "disrupts", "onetime", "kenzie", "gating", "boingboing", "sevier", "eberhard", "chek", "edr", "kharagpur", "fotze", "cvp", "deflated", "infestations", "judgmental", "meiji", "antipsychotic", "uwm", "infn", "slaughterhouse", "stix", "asg", "bagging", "brainwashing", "dmp", "disconnecting", "thera", "mclellan", "rong", "telcos", "wilmer", "sphincter", "orgys", "newsom", "infill", "fairhaven", "etude", "stereotyping", "talib", "dreamstime", "rearranging", "geographies", "tipp", "programmatically", "handicapper", "plantar", "ogaming", "xss", "academie", "quarrying", "approachable", "sweetener", "braised", "knut", "tibco", "fseek", "vided", "burk", "spigot", "skilling", "hunterdon", "nailer", "roxette", "hepatocytes", "coupes", "universitet", "mauricio", "lov", "hnd", "roseburg", "berlusconi", "chloroplast", "charing", "kansai", "buzzword", "nepad", "pistachio", "arv", "lanvin", "riverbank", "lilypond", "predominately", "metalware", "saugus", "nmac", "giza", "lancs", "culpepper", "rohm", "pretzel", "warping", "twc", "raitt", "iyer", "connotations", "iiia", "wilber", "yardstick", "neutrophil", "supernatant", "solu", "segmental", "multitudes", "imperium", "radley", "supercharger", "imagen", "thicknesses", "brk", "spew", "vestibular", "klausner", "riba", "witten", "orth", "calaveras", "naep", "deceleration", "bcn", "consignee", "aldehyde", "pronged", "baring", "jacked", "bigalow", "gyd", "centerfolds", "ortofon", "cropland", "wnt", "nazism", "kingswood", "operationally", "trix", "testicle", "rioja", "bhi", "technolo", "lindstrom", "pinter", "minox", "wofford", "guaifenesin", "hup", "bifida", "stratigraphic", "dundalk", "snipers", "kshirsagar", "ridgecrest", "placerville", "gosport", "sjc", "ircd", "rubrics", "kerouac", "ebx", "harken", "foc", "cooperated", "nwo", "cano", "kearny", "shopinfo", "tlb", "etp", "obie", "greaves", "versity", "amoco", "inzest", "msdos", "gabby", "dumbbells", "ncaaf", "ximage", "homotopy", "ironwood", "adiabatic", "pend", "licznik", "cck", "sabian", "saxton", "patties", "hopkinton", "biotherm", "ethno", "videochat", "cantwell", "accelerometer", "filip", "whl", "productio", "milli", "pdi", "bedava", "penobscot", "grav", "llcs", "fmr", "pimsleur", "micky", "setcl", "johnathan", "alisha", "gambier", "enterta", "crosley", "usace", "byrds", "sgm", "darrel", "isola", "laminator", "krazy", "diaryland", "bhubaneshwar", "quadrature", "summerland", "alessandra", "gsn", "dentry", "catskills", "tablecloths", "herder", "gec", "cinematical", "outfall", "unzipped", "plcc", "osb", "interchangeably", "concurs", "wef", "deformations", "farting", "nonspecific", "mek", "ohhh", "atopic", "harker", "culling", "limon", "murata", "zealot", "arca", "jmc", "toot", "rino", "sisley", "iveco", "gooey", "bielefeld", "parrott", "veillard", "lisinopril", "nprm", "tookie", "shanti", "burkett", "wemon", "turmeric", "carnelian", "zea", "geom", "dorman", "hmac", "abstracting", "parietal", "glyphosate", "underpants", "appleseed", "mandating", "prequalification", "macross", "kondo", "muzi", "bidet", "grubb", "redif", "oam", "domenici", "transdermal", "abramson", "recreating", "snot", "ductile", "dimensionless", "carex", "contractually", "kippur", "fibroids", "courtyards", "calderon", "dogster", "flattening", "sterilized", "pkcs", "unformatted", "cvr", "insulate", "afd", "tuolumne", "cobblestone", "showplace", "stockpiles", "mandir", "autore", "ashish", "meijer", "camberley", "babson", "fiennes", "meteorologist", "colonoscopy", "lofi", "tryp", "duromine", "alkaloids", "quesnel", "ake", "initrd", "centrality", "pisses", "campaigned", "twinning", "imag", "taster", "greenlight", "musicbrainz", "sourdough", "warrantless", "mzm", "croat", "arbors", "canwest", "homedics", "anydvd", "jnr", "odm", "dnn", "ashtrays", "punters", "dropper", "sarkar", "szabo", "wack", "ecx", "fette", "axl", "yoy", "spyro", "kendo", "surinam", "suze", "xenophobia", "krypton", "heisenberg", "dvcam", "nary", "ninn", "csis", "reconfigurable", "smil", "courchevel", "kittie", "lipman", "doz", "bsl", "chucky", "schlampe", "webdev", "doubleclick", "bushman", "pornofilm", "ood", "conexant", "hydroxylase", "rme", "multipass", "woodwinds", "telefoon", "ricotta", "motorways", "gandhinagar", "nsg", "edelweiss", "frampton", "humidor", "vacationing", "naturalizer", "dinesh", "techassist", "airdrie", "schiphol", "bruner", "tangy", "cfe", "gurnee", "bogdan", "farina", "gant", "cokin", "tricity", "cutaway", "artsy", "severability", "transferor", "cliches", "nosferatu", "indycar", "klimt", "onetouch", "dooney", "oconee", "smartbargains", "prl", "sackville", "camberwell", "hotlines", "hazelton", "nlg", "reaffirms", "anleitung", "webalizer", "libboost", "golds", "pfs", "imei", "corante", "recipesource", "ranching", "seguin", "calderdale", "anzeige", "toothpick", "volser", "westcoast", "forwarders", "aab", "likable", "ashburton", "natrol", "sonstiges", "shoestring", "vsx", "hosa", "brads", "winsite", "whirling", "doghouse", "displaytime", "bda", "ranitidine", "elit", "grebe", "standup", "playgirl", "flexion", "ibex", "geomagnetic", "lowestoft", "blobs", "footers", "reiss", "lewistown", "droppings", "designator", "causative", "brt", "woolrich", "gwasanaethau", "keefe", "tfp", "loveseat", "diethylpropion", "karyn", "handedly", "uncontested", "fov", "doxorubicin", "nerja", "cardiologists", "militarily", "fsus", "inflating", "sputnik", "barometric", "joburg", "assertequals", "gladwell", "regrowth", "lusaka", "lampwork", "adultos", "cybersex", "banca", "doughnut", "martz", "cribbage", "mela", "rondo", "tigr", "personel", "wcpo", "activ", "uiconstraints", "typescript", "inetd", "scuola", "piste", "pppd", "enos", "ondemand", "altamont", "steubenville", "rur", "danielson", "barfly", "vegetarianism", "extractors", "dictaphone", "callsign", "martinis", "envisions", "flexibly", "nakd", "natwest", "wilsons", "ccn", "reposition", "msci", "orginal", "hobbyists", "anat", "fleshbot", "weta", "sindh", "pcf", "glick", "obsoletes", "mammogram", "sani", "webcasting", "soggy", "apha", "ecologist", "ararat", "narrowband", "bph", "webstore", "maus", "reinstalling", "gendered", "relateddiagram", "kingsland", "ssid", "rackets", "litigants", "shimon", "ducted", "ebsq", "crisps", "modelle", "wristwatches", "xenadrine", "linac", "identifications", "dressy", "authenticator", "arash", "cristobal", "stewie", "depositories", "pcre", "setpoint", "rockdale", "evita", "ballmer", "hemphill", "taormina", "plath", "pickers", "boardgamegeek", "serbo", "oci", "noviembre", "mappoint", "surn", "minisd", "madmums", "mosher", "digitallife", "grahame", "forecasters", "linoleum", "shearling", "stockster", "firstcall", "dorint", "wmc", "culverts", "cuticle", "codebase", "rdfs", "lter", "pimples", "hdb", "shorted", "loghi", "spunky", "razz", "komatsu", "bietet", "madisonville", "readies", "jovenes", "deuterium", "totalitarianism", "trigonometric", "selmer", "popcap", "verbosity", "aashto", "pavarotti", "syncing", "vanden", "majeure", "beret", "fallbrook", "audiovideo", "muay", "longshot", "rollaway", "yor", "nonstandard", "tbr", "manoa", "laundries", "whoo", "tefal", "tothe", "crv", "amx", "falign", "goleta", "holst", "ebola", "redbook", "rangel", "consolidates", "disaggregated", "chromatographic", "supersport", "golly", "flumotion", "seagrass", "congratulates", "anais", "grievant", "reinstalled", "entreprises", "clemons", "eurovision", "airplus", "panchkula", "shahid", "phospholipids", "elsinore", "opendocument", "ankeny", "canzoni", "wakeman", "moana", "wobbly", "seagulls", "megawatts", "denning", "temas", "illuminator", "marylebone", "symbolically", "erotico", "linx", "randle", "nhu", "unsubstantiated", "centroid", "monogrammed", "gambian", "tailgating", "colville", "vpu", "russische", "sgp", "soccernet", "zing", "downunder", "snips", "allawi", "lockup", "cholinergic", "lhr", "barthelemy", "babymint", "benning", "implantable", "ligo", "haddad", "univariate", "katia", "motorcross", "sangha", "shn", "myfonts", "usuarios", "caml", "resiliency", "barossa", "astrobiology", "disinfectants", "kawai", "uktv", "dreamtime", "berkshires", "inhumane", "trobe", "unlocks", "auctex", "pogues", "panicked", "developerworks", "bullitt", "toed", "smartcard", "kushner", "hardcoresex", "crump", "gunderson", "paramus", "cepr", "lma", "politica", "randomization", "rinsing", "reschedule", "tob", "hostal", "preempt", "resold", "cyclo", "phosphor", "frontenac", "wipeout", "mambots", "unscented", "ipfw", "ergonomically", "roosters", "homologues", "loring", "ionosphere", "belvidere", "trotsky", "airworthiness", "sistemas", "devsource", "retroviral", "llnl", "keyloggers", "amgen", "marci", "willey", "yau", "groucho", "foreshore", "gusset", "dissapointed", "dtds", "mibs", "metalwork", "refering", "punting", "triphasil", "scab", "bhavnagar", "creedence", "musee", "wellstone", "lleol", "gpib", "tidbit", "allyson", "teriyaki", "impoundment", "interrelationships", "gres", "coffeecup", "maru", "joon", "josephus", "ulong", "maputo", "chev", "krispy", "dogtown", "abernathy", "raz", "fermion", "weltweit", "fluor", "bergstrom", "inoperable", "esrc", "asdf", "gollum", "ceus", "macintyre", "srd", "cyclonic", "cft", "unsubscribing", "shawna", "pinyin", "ipac", "ramone", "fethiye", "multipath", "hakusho", "tein", "treeview", "atd", "wonderswan", "eugenics", "dustjacket", "emmanuelle", "dlocaledir", "molotov", "sandpaper", "hbc", "fannin", "interscope", "eba", "melayu", "hardiness", "liss", "phew", "furuno", "moynihan", "johnsons", "heng", "dro", "carbonated", "waives", "wraparound", "jfs", "ejackulation", "reboots", "headliner", "sqr", "bustin", "powernetworker", "vul", "superposition", "supremes", "insite", "fanzine", "laney", "purportedly", "antigenic", "rurouni", "dietetics", "assembles", "veracruz", "hausfrauen", "wsf", "benzo", "vietcong", "chairwoman", "petrochemicals", "pata", "cntr", "nettime", "techies", "bentyxxo", "xango", "radish", "gatto", "checkmate", "gantt", "valli", "tuv", "starlets", "plavix", "roomba", "aficionado", "motivator", "bijan", "riv", "storrs", "tabula", "reigate", "emmons", "sandstorm", "laci", "taoist", "nameplate", "axp", "wcb", "mothering", "billard", "chrysanthemum", "reconstructions", "innodb", "sunspot", "aisha", "fluorine", "healdsburg", "retype", "fishin", "likud", "cyberread", "pme", "rothwell", "kmf", "creationist", "wth", "setlist", "scrollbars", "bocelli", "zuckerman", "vtd", "ampicillin", "arcy", "wasn", "cowbell", "rater", "everson", "angebot", "cezanne", "tamagotchi", "earpiece", "franca", "thymidine", "disa", "gearlog", "tranche", "volum", "prsp", "openvpn", "mcentire", "londra", "kaur", "unconstrained", "datadirect", "souter", "redfern", "tulum", "nyy", "pagesize", "osteopathy", "stavanger", "cated", "autry", "fip", "rooftops", "findpage", "discourages", "benitez", "boater", "shackleton", "weirdo", "congresswoman", "dalek", "tass", "itrip", "myob", "helloween", "reperfusion", "fieldhouse", "manukau", "libname", "eucharistic", "mong", "homeware", "ckt", "winmx", "mobic", "farts", "rourke", "lackawanna", "villiers", "comercio", "huy", "brooksville", "falwell", "gwb", "donwload", "wrth", "attrs", "knockoffs", "esm", "bionicle", "hygienist", "nichole", "quidditch", "dartmoor", "rowlett", "stapled", "gardenweb", "butternut", "nummer", "groban", "asw", "arora", "yatsura", "warr", "hainan", "esg", "logoff", "cockroach", "xanadu", "computable", "occup", "playgroup", "tintin", "ethnicities", "webposition", "crafter", "roby", "disassemble", "boltzmann", "caos", "abidjan", "anise", "grainy", "hospitalizations", "notizie", "zoek", "sepultura", "walkabout", "pepperoni", "optimising", "cityreview", "boathouse", "katt", "weissman", "siri", "herkimer", "namecite", "refreshingly", "aph", "ryland", "sculptural", "neurophysiology", "gsk", "hermanus", "mocldy", "ngage", "annexure", "ipchains", "yosef", "tlds", "gozo", "pso", "helton", "outflows", "saas", "asthmatic", "guillemot", "realizations", "linguistically", "jaco", "mckinsey", "dezember", "hylafax", "reconstitution", "amateurwebcam", "lumberton", "interviewee", "intereco", "portola", "hematologic", "sgc", "rebbe", "pinup", "transcendence", "surah", "brendon", "farberware", "statisticians", "swatches", "perioperative", "maoist", "henkel", "lilangeni", "trapeze", "lemmings", "extents", "spams", "omagh", "workcentre", "sunbird", "cellophane", "deland", "blevins", "sacha", "cardholders", "dddd", "accessori", "qo", "araujo", "mylist", "pcu", "kloczek", "enet", "seperated", "clusty", "rolfe", "cuttack", "provantage", "dominio", "hyperbaric", "nannofossil", "logansport", "bulldozer", "blacksonblondes", "subprime", "overpayments", "sharpie", "modutils", "whitehaven", "whaley", "currier", "taproot", "topsite", "delorme", "rayner", "aio", "rossum", "urbanism", "colloquia", "ewr", "capillaries", "mountainside", "menthol", "blackouts", "starkey", "eves", "hpux", "canby", "dragonflies", "montrail", "findfont", "aigner", "urusei", "soundblaster", "beatle", "webzine", "propranolol", "inescapable", "swabs", "absorbance", "lbw", "audiofile", "simba", "mohd", "redgoldfish", "cornbread", "jcaho", "appendixes", "aod", "crestview", "keynotes", "fotolia", "subnets", "cau", "espanola", "busnes", "froggy", "decarboxylase", "elfman", "throughs", "prioritise", "oreck", "schottland", "bagpipe", "terns", "erythematosus", "ftrs", "excitatory", "mcevoy", "fujita", "niagra", "yq", "dribble", "hardwired", "hosta", "grambling", "exten", "seeger", "ringgold", "sondheim", "interconnecting", "inkjets", "ebv", "underpinnings", "lazar", "laxatives", "mythos", "soname", "colloid", "hiked", "defrag", "zanesville", "oxidant", "umbra", "poppin", "trebuchet", "pyrite", "partido", "drunks", "submitters", "branes", "mahdi", "agoura", "manchesteronline", "blunkett", "lapd", "kidder", "hotkey", "tirupur", "parkville", "crediting", "tmo"] | gpl-3.0 |
grundgruen/zipline | zipline/utils/data_source_tables_gen.py | 40 | 7380 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getopt
import traceback
import numpy as np
import pandas as pd
import datetime
import logging
import tables
import gzip
import glob
import os
import random
import csv
import time
from six import print_
FORMAT = "%(asctime)-15s -8s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
OHLCTableDescription = {'sid': tables.StringCol(14, pos=2),
'dt': tables.Int64Col(pos=1),
'open': tables.Float64Col(dflt=np.NaN, pos=3),
'high': tables.Float64Col(dflt=np.NaN, pos=4),
'low': tables.Float64Col(dflt=np.NaN, pos=5),
'close': tables.Float64Col(dflt=np.NaN, pos=6),
"volume": tables.Int64Col(dflt=0, pos=7)}
def process_line(line):
dt = np.datetime64(line["dt"]).astype(np.int64)
sid = line["sid"]
open_p = float(line["open"])
high_p = float(line["high"])
low_p = float(line["low"])
close_p = float(line["close"])
volume = int(line["volume"])
return (dt, sid, open_p, high_p, low_p, close_p, volume)
def parse_csv(csv_reader):
previous_date = None
data = []
dtype = [('dt', 'int64'), ('sid', '|S14'), ('open', float),
('high', float), ('low', float), ('close', float),
('volume', int)]
for line in csv_reader:
row = process_line(line)
current_date = line["dt"][:10].replace("-", "")
if previous_date and previous_date != current_date:
rows = np.array(data, dtype=dtype).view(np.recarray)
yield current_date, rows
data = []
data.append(row)
previous_date = current_date
def merge_all_files_into_pytables(file_dir, file_out):
"""
process each file into pytables
"""
start = None
start = datetime.datetime.now()
out_h5 = tables.openFile(file_out,
mode="w",
title="bars",
filters=tables.Filters(complevel=9,
complib='zlib'))
table = None
for file_in in glob.glob(file_dir + "/*.gz"):
gzip_file = gzip.open(file_in)
expected_header = ["dt", "sid", "open", "high", "low", "close",
"volume"]
csv_reader = csv.DictReader(gzip_file)
header = csv_reader.fieldnames
if header != expected_header:
logging.warn("expected header %s\n" % (expected_header))
logging.warn("header_found %s" % (header))
return
for current_date, rows in parse_csv(csv_reader):
table = out_h5.createTable("/TD", "date_" + current_date,
OHLCTableDescription,
expectedrows=len(rows),
createparents=True)
table.append(rows)
table.flush()
if table is not None:
table.flush()
end = datetime.datetime.now()
diff = (end - start).seconds
logging.debug("finished it took %d." % (diff))
def create_fake_csv(file_in):
fields = ["dt", "sid", "open", "high", "low", "close", "volume"]
gzip_file = gzip.open(file_in, "w")
dict_writer = csv.DictWriter(gzip_file, fieldnames=fields)
current_dt = datetime.date.today() - datetime.timedelta(days=2)
current_dt = pd.Timestamp(current_dt).replace(hour=9)
current_dt = current_dt.replace(minute=30)
end_time = pd.Timestamp(datetime.date.today())
end_time = end_time.replace(hour=16)
last_price = 10.0
while current_dt < end_time:
row = {}
row["dt"] = current_dt
row["sid"] = "test"
last_price += random.randint(-20, 100) / 10000.0
row["close"] = last_price
row["open"] = last_price - 0.01
row["low"] = last_price - 0.02
row["high"] = last_price + 0.02
row["volume"] = random.randint(10, 1000) * 10
dict_writer.writerow(row)
current_dt += datetime.timedelta(minutes=1)
if current_dt.hour > 16:
current_dt += datetime.timedelta(days=1)
current_dt = current_dt.replace(hour=9)
current_dt = current_dt.replace(minute=30)
gzip_file.close()
def main(argv=None):
"""
This script cleans minute bars into pytables file
data_source_tables_gen.py
[--tz_in] sets time zone of data only reasonably fast way to use
time.tzset()
[--dir_in] iterates through directory provided of csv files in gzip form
in form:
dt, sid, open, high, low, close, volume
2012-01-01T12:30:30,1234HT,1, 2,3,4.0
[--fake_csv] creates a fake sample csv to iterate through
[--file_out] determines output file
"""
if argv is None:
argv = sys.argv
try:
dir_in = None
file_out = "./all.h5"
fake_csv = None
try:
opts, args = getopt.getopt(argv[1:], "hdft",
["help",
"dir_in=",
"debug",
"tz_in=",
"fake_csv=",
"file_out="])
except getopt.error as msg:
raise Usage(msg)
for opt, value in opts:
if opt in ("--help", "-h"):
print_(main.__doc__)
if opt in ("-d", "--debug"):
logging.basicConfig(format=FORMAT,
level=logging.DEBUG)
if opt in ("-d", "--dir_in"):
dir_in = value
if opt in ("-o", "--file_out"):
file_out = value
if opt in ("--fake_csv"):
fake_csv = value
if opt in ("--tz_in"):
os.environ['TZ'] = value
time.tzset()
try:
if dir_in:
merge_all_files_into_pytables(dir_in, file_out)
if fake_csv:
create_fake_csv(fake_csv)
except Exception:
error = "An unhandled error occured in the"
error += "data_source_tables_gen.py script."
error += "\n\nTraceback:\n"
error += '-' * 70 + "\n"
error += "".join(traceback.format_tb(sys.exc_info()[2]))
error += repr(sys.exc_info()[1]) + "\n"
error += str(sys.exc_info()[1]) + "\n"
error += '-' * 70 + "\n"
print_(error)
except Usage as err:
print_(err.msg)
print_("for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
mtat76/atm-py | atmPy/aerosols/size_distr/sizedistribution.py | 6 | 80435 | import datetime
import warnings
from copy import deepcopy
import numpy as np
import pandas as pd
import pylab as plt
import scipy.optimize as optimization
from matplotlib.colors import LogNorm
from scipy import integrate
from scipy import stats
from atmPy.atmos import vertical_profile, timeseries
from atmPy.aerosols import hygroscopic_growth as hg
from atmPy.for_removal.mie import bhmie
from atmPy.tools import pandas_tools
from atmPy.tools import plt_tools, math_functions, array_tools
# Todo: rotate the plots of the layerseries (e.g. plot_particle_concentration) to have the altitude as the y-axes
# TODO: Fix distrTypes so they are consistent with our understanding.
distTypes = {'log normal': ['dNdlogDp', 'dSdlogDp', 'dVdlogDp'],
'natural': ['dNdDp', 'dSdDp', 'dVdDp'],
'number': ['dNdlogDp', 'dNdDp'],
'surface': ['dSdlogDp', 'dSdDp'],
'volume': ['dVdlogDp', 'dVdDp']}
axes_types = ('AxesSubplot', 'AxesHostAxes')
def fit_normal_dist(x, y, log=True, p0=[10, 180, 0.2]):
"""Fits a normal distribution to a """
param = p0[:]
x = x[~ np.isnan(y)]
y = y[~ np.isnan(y)]
if log:
x = np.log10(x)
param[1] = np.log10(param[1])
# todo: write a bug report for the fact that I have to call the y.max() function to make the fit to work!!!!!
y.max()
############
para = optimization.curve_fit(math_functions.gauss, x, y, p0=param)
amp = para[0][0]
sigma = para[0][2]
if log:
pos = 10 ** para[0][1]
sigma_high = 10 ** (para[0][1] + para[0][2])
sigma_low = 10 ** (para[0][1] - para[0][2])
else:
pos = para[0][1]
sigma_high = (para[0][1] + para[0][2])
sigma_low = (para[0][1] - para[0][2])
return [amp, pos, sigma, sigma_high, sigma_low]
def read_csv(fname, fixGaps=True):
headerNo = 50
rein = open(fname, 'r')
nol = ['distributionType', 'objectType']
outDict = {}
for i in range(headerNo):
split = rein.readline().split('=')
variable = split[0].strip()
if split[0][0] == '#':
break
value = split[1].strip()
if variable in nol:
outDict[variable] = value
else:
outDict[variable] = np.array(eval(value))
if i == headerNo - 1:
raise TypeError('Sure this is a size distribution?')
rein.close()
data = pd.read_csv(fname, header=i + 1, index_col=0)
data.index = pd.to_datetime(data.index)
if outDict['objectType'] == 'SizeDist_TS':
distRein = SizeDist_TS(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
elif outDict['objectType'] == 'SizeDist':
distRein = SizeDist(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
elif outDict['objectType'] == 'SizeDist_LS':
distRein = SizeDist_LS(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
else:
raise TypeError('not a valid object type')
return distRein
def read_hdf(f_name, keep_open = False, populate_namespace = False):
hdf = pd.HDFStore(f_name)
content = hdf.keys()
out = []
for i in content:
# print(i)
storer = hdf.get_storer(i)
attrs = storer.attrs.atmPy_attrs
if not attrs:
continue
elif attrs['type'].__name__ == 'SizeDist_TS':
dist_new = SizeDist_TS(hdf[i], attrs['bins'], attrs['distributionType'])
elif attrs['type'].__name__ == 'SizeDist':
dist_new = SizeDist(hdf[i], attrs['bins'], attrs['distributionType'])
elif attrs['type'].__name__ == 'SizeDist_LS':
dist_new = SizeDist_LS(hdf[i], attrs['bins'], attrs['distributionType'], attrs['layerbounderies'])
else:
txt = 'Unknown data type: %s'%attrs['type'].__name__
raise TypeError(txt)
fit_res = i+'/data_fit_normal'
if fit_res in content:
dist_new.data_fit_normal = hdf[fit_res]
if populate_namespace:
if attrs['variable_name']:
populate_namespace[attrs['variable_name']] = dist_new
out.append(dist_new)
if keep_open:
return hdf,out
else:
hdf.close()
return out
def get_label(distType):
""" Return the appropriate label for a particular distribution type
"""
if distType == 'dNdDp':
label = '$\mathrm{d}N\,/\,\mathrm{d}D_{P}$ (nm$^{-1}\,$cm$^{-3}$)'
elif distType == 'dNdlogDp':
label = '$\mathrm{d}N\,/\,\mathrm{d}log(D_{P})$ (cm$^{-3}$)'
elif distType == 'dSdDp':
label = '$\mathrm{d}S\,/\,\mathrm{d}D_{P}$ (nm$\,$cm$^{-3}$)'
elif distType == 'dSdlogDp':
label = '$\mathrm{d}S\,/\,\mathrm{d}log(D_{P})$ (nm$^2\,$cm$^{-3}$)'
elif distType == 'dVdDp':
label = '$\mathrm{d}V\,/\,\mathrm{d}D_{P}$ (nm$^2\,$cm$^{-3}$)'
elif distType == 'dVdlogDp':
label = '$\mathrm{d}V\,/\,\mathrm{d}log(D_{P})$ (nm$^3\,$cm$^{-3}$)'
elif distType == 'calibration':
label = '$\mathrm{d}N\,/\,\mathrm{d}Amp$ (bin$^{-1}\,$cm$^{-3}$)'
elif distType == 'numberConcentration':
label = 'Particle number in bin'
else:
raise ValueError('%s is not really an option!?!' % distType)
return label
# Todo: Docstring is wrong
# Todo: implement into the Layer Series
def _calculate_optical_properties(sd, wavelength, n, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
out = {}
out['n'] = n
out['wavelength'] = wavelength
sdls = sd.convert2numberconcentration()
index = sdls.data.index
if isinstance(n, pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
mie, angular_scatt_func = _perform_Miecalculations(np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
if aod:
AOD_layer = np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
angular_scatt_func_effective = pd.DataFrame()
asymmetry_parameter_LS = np.zeros((len(sdls.data.index.values)))
# print('\n oben mie.extinction_crossection: %s \n'%(mie.extinction_crossection))
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values
# print('laydata: ',laydata.shape)
# print(laydata)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
# print('\n oben ext_coef %s \n'%extinction_coefficient)
# print('mie.extinction_crossection ', mie.extinction_crossection.shape)
# print('extinction_coefficient: ', extinction_coefficient.shape)
# scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
# return laydata, mie.scattering_crossection
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
# pfe2 = pfe.copy()
# angular_scatt_func_effective[lc] = pfe
# asymmetry_parameter_LS[i] = (pfe.values*np.cos(pfe.index.values)).sum()/pfe.values.sum()
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < np.pi]
x_1p = x_2p[x_2p < np.pi]
# integ = integrate.simps(y_1p*np.sin(x_1p),x_1p)
# y_phase_func = y_1p/integ
y_phase_func = y_1p * 4 * np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * integrate.simps(np.cos(x_1p) * y_phase_func * np.sin(x_1p), x_1p)
# return mie,phase_fct, laydata, scattering_cross_eff, phase_fct_effective[lc], y_phase_func, asymmetry_parameter_LS[i]
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
# return mie.extinction_crossection, angular_scatt_func, laydata, layerThickness # correct integrales match
# return extinction_coefficient, angular_scatt_func_effective
# return AOD_layer, pfe, angular_scatt_func_effective[lc]
# print(mie.extinction_crossection)
if aod:
out['AOD'] = AOD_layer[~ np.isnan(AOD_layer)].sum()
out['AOD_layer'] = pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
extCoeff_perrow = pd.DataFrame(extCoeff_perrow_perbin.sum(axis=1), columns=['ext_coeff'])
if index.dtype == '<M8[ns]':
out['extCoeff_perrow'] = timeseries.TimeSeries(extCoeff_perrow)
else:
out['extCoeff_perrow'] = extCoeff_perrow
out['asymmetry_param'] = pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
# out['asymmetry_param_alt'] = pd.DataFrame(asymmetry_parameter_LS_alt, index=sdls.layercenters, columns = ['asymmetry_param_alt'])
# out['OptPropInstance']= OpticalProperties(out, self.bins)
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['angular_scatt_func'] = angular_scatt_func_effective
# opt_properties = OpticalProperties(out, self.bins)
# opt_properties.wavelength = wavelength
# opt_properties.index_of_refractio = n
# opt_properties.angular_scatt_func = angular_scatt_func_effective # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
# opt_properties.parent_dist_LS = self
return out
class SizeDist(object):
"""
Object defining a log normal aerosol size distribution
Arguments
----------
bincenters: NumPy array, optional
this is if you actually want to pass the bincenters, if False they will be calculated
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
data: pandas dataFrame, optional
None, will generate an empty pandas data frame with columns defined by bins
- pandas dataFrame with
- column names (each name is something like this: '150-200')
- index is time (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
Notes
------
* Diameters are specified in nanometers
"""
# todo: write setters and getters for bins and bincenter, so when one is changed the otherone is automatically
# changed too
def __init__(self, data, bins, distrType,
# bincenters=False,
fixGaps=True):
if type(data).__name__ == 'NoneType':
self.data = pd.DataFrame()
else:
self.data = data
self.bins = bins
self.__index_of_refraction = None
self.__growth_factor = None
# if type(bincenters) == np.ndarray:
# self.bincenters = bincenters
# else:
# self.bincenters = (bins[1:] + bins[:-1]) / 2.
# self.binwidth = (bins[1:] - bins[:-1])
self.distributionType = distrType
if fixGaps:
self.fillGaps()
@property
def bins(self):
return self.__bins
@bins.setter
def bins(self,array):
bins_st = array.astype(int).astype(str)
col_names = []
for e,i in enumerate(bins_st):
if e == len(bins_st) - 1:
break
col_names.append(bins_st[e] + '-' + bins_st[e+1])
self.data.columns = col_names
self.__bins = array
self.__bincenters = (array[1:] + array[:-1]) / 2.
self.__binwidth = (array[1:] - array[:-1])
@property
def bincenters(self):
return self.__bincenters
@property
def binwidth(self):
return self.__binwidth
@property
def index_of_refraction(self):
return self.__index_of_refraction
@index_of_refraction.setter
def index_of_refraction(self,n):
# if not self.__index_of_refraction:
self.__index_of_refraction = n
# elif self.__index_of_refraction:
# txt = """Security stop. This is to prevent you from unintentionally changing this value.
# The index of refraction is already set to %.2f, either by you or by another function, e.g. apply_hygro_growth.
# If you really want to change the value do it by setting the __index_of_refraction attribute."""%self.index_of_refraction
# raise ValueError(txt)
@property
def growth_factor(self):
return self.__growth_factor
def apply_hygro_growth(self, kappa, RH, how = 'shift_bins'):
"""
how: string ['shift_bins', 'shift_data']
If the shift_bins the growth factor has to be the same for all lines in
data (important for timeseries and vertical profile.
If gf changes (as probably the case in TS and LS) you want to use
'shift_data'
"""
if not self.index_of_refraction:
txt = '''The index_of_refraction attribute of this sizedistribution has not been set yet, please do so first!'''
raise ValueError(txt)
# out_I = {}
dist_g = self.copy()
dist_g.convert2numberconcentration()
gf,n_mix = hg.kappa_simple(kappa, RH, n = dist_g.index_of_refraction)
# out_I['growth_factor'] = gf
nat = ['int', 'float']
if type(kappa).__name__ in nat or type(RH).__name__ in nat:
if how != 'shift_bins':
txt = "When kappa or RH ar not arrays 'how' has to be equal to 'shift_bins'"
raise ValueError(txt)
if how == 'shift_bins':
if not isinstance(gf, (float,int)):
txt = '''If how is equal to 'shift_bins' RH has to be of type int or float.
It is %s'''%(type(RH).__name__)
raise TypeError(txt)
dist_g.bins = dist_g.bins * gf
dist_g.__index_of_refraction = n_mix
elif how == 'shift_data':
test = dist_g._hygro_growht_shift_data(dist_g.data.values[0],dist_g.bins,gf.max())
bin_num = test['data'].shape[0]
data_new = np.zeros((dist_g.data.shape[0],bin_num))
for e,i in enumerate(dist_g.data.values):
out = dist_g._hygro_growht_shift_data(i,dist_g.bins,gf[e])
dt = out['data']
diff = bin_num - dt.shape[0]
dt = np.append(dt, np.zeros(diff))
data_new[e] = dt
df = pd.DataFrame(data_new)
df.index = dist_g.data.index
# return df
dist_g = SizeDist(df, test['bins'], dist_g.distributionType)
df = pd.DataFrame(n_mix, columns = ['index_of_refraction'])
df.index = dist_g.data.index
dist_g.index_of_refraction = df
else:
txt = '''How has to be either 'shift_bins' or 'shift_data'.'''
raise ValueError(txt)
dist_g.__growth_factor = pd.DataFrame(gf, index = dist_g.data.index, columns = ['Growth_factor'])
# out_I['size_distribution'] = dist_g
return dist_g
def _hygro_growht_shift_data(self, data, bins, gf):
"""data: 1D array
bins: 1D array
gf: float"""
bins = bins.copy()
if np.any(gf < 1):
txt = 'Growth factor must be equal or larger than 1. No shrinking!!'
raise ValueError(txt)
shifted = bins*gf
ml = array_tools.find_closest(bins, shifted, how='closest_low')
mh = array_tools.find_closest(bins, shifted, how='closest_high')
if np.any((mh - ml) > 1):
raise ValueError('shifted bins spans over more than two of the original bins, programming required ;-)')
no_extra_bins = bins[ml].shape[0] - np.unique(bins[ml]).shape[0] + 1
######### Ad bins to shift data into
last_two = np.log10(bins[- (no_extra_bins + 1):])
step_width = last_two[-1] - last_two[-2]
new_bins = np.zeros(no_extra_bins)
for i in range(no_extra_bins):
new_bins[i] = np.log10(bins[-1]) + ((i + 1) * step_width)
newbins = 10**new_bins
bins = np.append(bins,newbins)
shifted = (bins * gf)[:-no_extra_bins]
######## and again ########################
ml = array_tools.find_closest(bins, shifted, how='closest_low')
mh = array_tools.find_closest(bins, shifted, how='closest_high')
if np.any((mh - ml) > 1):
raise ValueError('shifted bins spans over more than two of the original bins, programming required ;-)')
##### percentage of particles moved to next bin ...')
shifted_w = shifted[1:] - shifted[:-1]
fract_first = (bins[mh] - shifted)[:-1]/shifted_w
fract_last = (shifted - bins[ml])[1:]/shifted_w
data_new = np.zeros(data.shape[0]+ no_extra_bins)
data_new[no_extra_bins - 1:-1] += fract_first * data
data_new[no_extra_bins:] += fract_last * data
# data = np.append(data, np.zeros(no_extra_bins))
out = {}
out['bins'] = bins
out['data'] = data_new
out['num_extr_bins'] = no_extra_bins
return out
# def grow_particles(self, shift=1):
# """This function shifts the data by "shift" columns to the right
# Argurments
# ----------
# shift: int.
# number of columns to shift.
#
# Returns
# -------
# New dist_LS instance
# Growth ratio (mean,std) """
#
# dist_grow = self.copy()
# gf = dist_grow.bincenters[shift:] / dist_grow.bincenters[:-shift]
# gf_mean = gf.mean()
# gf_std = gf.std()
#
# shape = dist_grow.data.shape[1]
# dist_grow.data[:] = 0
# dist_grow.data.iloc[:, shift:] = self.data.values[:, :shape - shift]
#
# return dist_grow, (gf_mean, gf_std)
def calculate_optical_properties(self, wavelength, n):
out = _calculate_optical_properties(self, wavelength, n)
return out
def fillGaps(self, scale=1.1):
"""
Finds gaps in dataset (e.g. when instrument was shut of) and fills them with zeros.
It adds one line of zeros to the beginning and one to the end of the gap.
Therefore the gap is visible as zeros instead of the interpolated values
Parameters
----------
scale: float, optional
This is a scale.
"""
diff = self.data.index[1:].values - self.data.index[0:-1].values
threshold = np.median(diff) * scale
where = np.where(diff > threshold)[0]
if len(where) != 0:
warnings.warn('The dataset provided had %s gaps' % len(where))
gap_start = self.data.index[where]
gap_end = self.data.index[where + 1]
for gap_s in gap_start:
self.data.loc[gap_s + threshold] = np.zeros(self.bincenters.shape)
for gap_e in gap_end:
self.data.loc[gap_e - threshold] = np.zeros(self.bincenters.shape)
self.data = self.data.sort_index()
return
def fit_normal(self, log=True, p0=[10, 180, 0.2]):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
sd = self.copy()
if sd.distributionType != 'dNdlogDp':
if sd.distributionType == 'calibration':
pass
else:
warnings.warn(
"Size distribution is not in 'dNdlogDp'. I temporarily converted the distribution to conduct the fitting. If that is not what you want, change the code!")
sd = sd.convert2dNdlogDp()
n_lines = sd.data.shape[0]
amp = np.zeros(n_lines)
pos = np.zeros(n_lines)
sigma = np.zeros(n_lines)
sigma_high = np.zeros(n_lines)
sigma_low = np.zeros(n_lines)
for e, lay in enumerate(sd.data.values):
try:
fit_res = fit_normal_dist(sd.bincenters, lay, log=log, p0=p0)
except (ValueError, RuntimeError):
fit_res = [np.nan, np.nan, np.nan, np.nan, np.nan]
amp[e] = fit_res[0]
pos[e] = fit_res[1]
sigma[e] = fit_res[2]
sigma_high[e] = fit_res[3]
sigma_low[e] = fit_res[4]
df = pd.DataFrame()
df['Amp'] = pd.Series(amp)
df['Pos'] = pd.Series(pos)
df['Sigma'] = pd.Series(sigma)
df['Sigma_high'] = pd.Series(sigma_high)
df['Sigma_low'] = pd.Series(sigma_low)
# df.index = self.layercenters
self.data_fit_normal = df
return self.data_fit_normal
def get_particle_concentration(self):
""" Returns the sum of particles per line in data
Returns
-------
int: if data has only one line
pandas.DataFrame: else """
sd = self.convert2numberconcentration()
particles = np.zeros(sd.data.shape[0])
for e, line in enumerate(sd.data.values):
particles[e] = line.sum()
if sd.data.shape[0] == 1:
return particles[0]
else:
df = pd.DataFrame(particles, index=sd.data.index, columns=['Count_rate'])
return df
def plot(self,
showMinorTickLabels=True,
removeTickLabels=["700", "900"],
fit_res=True,
fit_res_scale = 'log',
ax=None,
):
"""
Plots and returns f,a (figure, axis).
Arguments
---------
showMinorTickLabels: bool [True], optional
if minor tick labels are labled
removeTickLabels: list of string ["700", "900"], optional
list of tick labels aught to be removed (in case there are overlapping)
fit_res: bool [True], optional
allows plotting of fitresults if fit_normal was previously executed
fit_res: string
If fit_normal was done using log = False, you want to set this to linear!
ax: axis object [None], optional
option to provide axis to plot on
Returns
-------
Handles to the figure and axes of the figure.
"""
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
g, = a.plot(self.bincenters, self.data.loc[0], color=plt_tools.color_cycle[0], linewidth=2, label='exp.')
g.set_drawstyle('steps-mid')
a.set_xlabel('Particle diameter (nm)')
label = get_label(self.distributionType)
a.set_ylabel(label)
a.set_xscale('log')
if fit_res:
if 'data_fit_normal' in dir(self):
amp, pos, sigma = self.data_fit_normal.values[0, :3]
if fit_res_scale == 'log':
normal_dist = math_functions.gauss(np.log10(self.bincenters), amp, np.log10(pos), sigma)
elif fit_res_scale =='linear':
normal_dist = math_functions.gauss(self.bincenters, amp, pos, sigma)
else:
txt = '"fit_res_scale has to be either log or linear'
raise ValueError(txt)
a.plot(self.bincenters, normal_dist, color=plt_tools.color_cycle[1], linewidth=2,
label='fit with norm. dist.')
a.legend()
return f, a
def convert2dNdDp(self):
return self._convert2otherDistribution('dNdDp')
def convert2dNdlogDp(self):
return self._convert2otherDistribution('dNdlogDp')
def convert2dSdDp(self):
return self._convert2otherDistribution('dSdDp')
def convert2dSdlogDp(self):
return self._convert2otherDistribution('dSdlogDp')
def convert2dVdDp(self):
return self._convert2otherDistribution('dVdDp')
def convert2dVdlogDp(self):
return self._convert2otherDistribution('dVdlogDp')
def convert2numberconcentration(self):
return self._convert2otherDistribution('numberConcentration')
def copy(self):
return deepcopy(self)
def save_csv(self, fname, header=True):
if header:
raus = open(fname, 'w')
raus.write('bins = %s\n' % self.bins.tolist())
raus.write('distributionType = %s\n' % self.distributionType)
raus.write('objectType = %s\n' % (type(self).__name__))
raus.write('#\n')
raus.close()
self.data.to_csv(fname, mode='a')
return
def save_hdf(self, hdf, variable_name = None, info = '', force = False):
if variable_name:
table_name = '/atmPy/aerosols/sizedistribution/'+variable_name
if table_name in hdf.keys():
if not force:
txt = 'Table name (variable_name) exists. If you want to overwrite it set force to True.'
raise KeyError(txt)
else:
e = 0
while 1:
table_name = '/atmPy/aerosols/sizedistribution/'+ type(self).__name__ + '_%.3i'%e
if table_name in hdf.keys():
e+=1
else:
break
hdf.put(table_name, self.data)
storer = hdf.get_storer(table_name)
attrs = {}
attrs['variable_name'] = variable_name
attrs['info'] = info
attrs['type'] = type(self)
attrs['bins'] = self.bins
attrs['index_of_refraction'] = self.index_of_refraction
attrs['distributionType'] = self.distributionType
if 'layerbounderies' in dir(self):
attrs['layerbounderies'] = self.layerbounderies
storer.attrs.atmPy_attrs = attrs
if 'data_fit_normal' in dir(self):
table_name = table_name + '/data_fit_normal'
hdf.put(table_name, self.data_fit_normal)
storer = hdf.get_storer(table_name)
storer.attrs.atmPy_attrs = None
return hdf
def zoom_diameter(self, start=None, end=None):
sd = self.copy()
if start:
startIdx = array_tools.find_closest(sd.bins, start)
else:
startIdx = 0
if end:
endIdx = array_tools.find_closest(sd.bins, end)
else:
endIdx = len(self.bincenters)
# size_distr.binwidth = self.binwidth[startIdx:endIdx]
sd.data = self.data.iloc[:, startIdx:endIdx]
sd.bins = self.bins[startIdx:endIdx + 1]
# size_distr.bincenters = self.bincenters[startIdx:endIdx]
return sd
def _normal2log(self):
trans = (self.bincenters * np.log(10.))
return trans
def _2Surface(self):
trans = 4. * np.pi * (self.bincenters / 2.) ** 2
return trans
def _2Volume(self):
trans = 4. / 3. * np.pi * (self.bincenters / 2.) ** 3
return trans
def _convert2otherDistribution(self, distType, verbose=False):
dist = self.copy()
if dist.distributionType == distType:
if verbose:
warnings.warn(
'Distribution type is already %s. Output is an unchanged copy of the distribution' % distType)
return dist
if dist.distributionType == 'numberConcentration':
pass
elif distType == 'numberConcentration':
pass
elif dist.distributionType in distTypes['log normal']:
if distType in distTypes['log normal']:
if verbose:
print('both log normal')
else:
dist.data = dist.data / self._normal2log()
elif dist.distributionType in distTypes['natural']:
if distType in distTypes['natural']:
if verbose:
print('both natural')
else:
dist.data = dist.data * self._normal2log()
else:
raise ValueError('%s is not an option' % distType)
if dist.distributionType == 'numberConcentration':
pass
elif distType == 'numberConcentration':
pass
elif dist.distributionType in distTypes['number']:
if distType in distTypes['number']:
if verbose:
print('both number')
else:
if distType in distTypes['surface']:
dist.data *= self._2Surface()
elif distType in distTypes['volume']:
dist.data *= self._2Volume()
else:
raise ValueError('%s is not an option' % distType)
elif dist.distributionType in distTypes['surface']:
if distType in distTypes['surface']:
if verbose:
print('both surface')
else:
if distType in distTypes['number']:
dist.data /= self._2Surface()
elif distType in distTypes['volume']:
dist.data *= self._2Volume() / self._2Surface()
else:
raise ValueError('%s is not an option' % distType)
elif dist.distributionType in distTypes['volume']:
if distType in distTypes['volume']:
if verbose:
print('both volume')
else:
if distType in distTypes['number']:
dist.data /= self._2Volume()
elif distType in distTypes['surface']:
dist.data *= self._2Surface() / self._2Volume()
else:
raise ValueError('%s is not an option' % distType)
else:
raise ValueError('%s is not an option' % distType)
if distType == 'numberConcentration':
dist = dist.convert2dNdDp()
dist.data *= self.binwidth
elif dist.distributionType == 'numberConcentration':
dist.data = dist.data / self.binwidth
dist.distributionType = 'dNdDp'
dist = dist._convert2otherDistribution(distType)
dist.distributionType = distType
if verbose:
print('converted from %s to %s' % (self.distributionType, dist.distributionType))
return dist
class SizeDist_TS(SizeDist):
"""Returns a SizeDistribution_TS instance.
Parameters:
-----------
data: pandas dataFrame with
- column names (each name is something like this: '150-200')
- index is time (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
"""
def fit_normal(self, log=True, p0=[10, 180, 0.2]):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
super(SizeDist_TS, self).fit_normal(log=log, p0=p0)
self.data_fit_normal.index = self.data.index
return self.data_fit_normal
def _getXYZ(self):
"""
This will create three arrays, so when plotted with pcolor each pixel will represent the exact bin width
"""
binArray = np.repeat(np.array([self.bins]), self.data.index.shape[0], axis=0)
timeArray = np.repeat(np.array([self.data.index.values]), self.bins.shape[0], axis=0).transpose()
ext = np.array([np.zeros(self.data.index.values.shape)]).transpose()
Z = np.append(self.data.values, ext, axis=1)
return timeArray, binArray, Z
def get_timespan(self):
return self.data.index.min(), self.data.index.max()
# TODO: Fix plot options such as showMinorTickLabels
def plot(self,
vmax=None,
vmin=None,
norm='linear',
showMinorTickLabels=True,
# removeTickLabels=["700", "900"],
ax=None,
fit_pos=True,
cmap=plt_tools.get_colorMap_intensity(),
colorbar=True):
""" plots an intensity plot of all data
Arguments
---------
scale (optional): ('log',['linear']) - defines how the z-direction is scaled
vmax
vmin
show_minor_tickLabels:
cma:
fit_pos: bool[True]. Optional
plots the position of a fitted normal distribution onto the plot.
in order for this to work execute fit_normal
ax (optional): axes instance [None] - option to plot on existing axes
Returns
-------
f,a,pc,cb (figure, axis, pcolormeshInstance, colorbar)
"""
X, Y, Z = self._getXYZ()
Z = np.ma.masked_invalid(Z)
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
f.autofmt_xdate()
if norm == 'log':
norm = LogNorm()
elif norm == 'linear':
norm = None
pc = a.pcolormesh(X, Y, Z, vmin=vmin, vmax=vmax, norm=norm, cmap=cmap)
a.set_yscale('log')
a.set_ylim((self.bins[0], self.bins[-1]))
a.set_xlabel('Time (UTC)')
a.get_yaxis().set_tick_params(direction='out', which='both')
a.get_xaxis().set_tick_params(direction='out', which='both')
if self.distributionType == 'calibration':
a.set_ylabel('Amplitude (digitizer bins)')
else:
a.set_ylabel('Diameter (nm)')
if colorbar:
cb = f.colorbar(pc)
label = get_label(self.distributionType)
cb.set_label(label)
else:
cb = get_label(self.distributionType)
# if self.distributionType != 'calibration':
# a.yaxis.set_major_formatter(plt.FormatStrFormatter("%i"))
# f.canvas.draw() # this is important, otherwise the ticks (at least in case of minor ticks) are not created yet
if showMinorTickLabels:
minf = plt_tools.get_formatter_minor_log()
a.yaxis.set_minor_formatter(minf)
# a.yaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
# ticks = a.yaxis.get_minor_ticks()
# for i in ticks:
# if i.label.get_text() in removeTickLabels:
# i.label.set_visible(False)
if fit_pos:
if 'data_fit_normal' in dir(self):
a.plot(self.data.index, self.data_fit_normal.Pos, color='m', linewidth=2, label='normal dist. center')
leg = a.legend(fancybox=True, framealpha=0.5)
leg.draw_frame(True)
return f, a, pc, cb
def plot_fitres(self):
""" Plots the results from fit_normal"""
f, a = plt.subplots()
data = self.data_fit_normal.dropna()
a.fill_between(data.index, data.Sigma_high, data.Sigma_low,
color=plt_tools.color_cycle[0],
alpha=0.5,
)
a.plot(data.index.values, data.Pos.values, color=plt_tools.color_cycle[0], linewidth=2, label='center')
# data.Pos.plot(ax=a, color=plt_tools.color_cycle[0], linewidth=2, label='center')
a.legend(loc=2)
a.set_ylabel('Particle diameter (nm)')
a.set_xlabel('Altitude (m)')
a2 = a.twinx()
# data.Amp.plot(ax=a2, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')
a2.plot(data.index.values, data.Amp.values, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')
a2.legend()
a2.set_ylabel('Amplitude - %s' % (get_label(self.distributionType)))
f.autofmt_xdate()
return f, a, a2
def plot_particle_concentration(self, ax=None, label=None):
"""Plots the particle rate as a function of time.
Parameters
----------
ax: matplotlib.axes instance, optional
perform plot on these axes.
Returns
-------
matplotlib.axes instance
"""
if type(ax).__name__ in axes_types:
color = plt_tools.color_cycle[len(ax.get_lines())]
f = ax.get_figure()
else:
f, ax = plt.subplots()
color = plt_tools.color_cycle[0]
# layers = self.convert2numberconcentration()
particles = self.get_particle_concentration().dropna()
ax.plot(particles.index.values, particles.Count_rate.values, color=color, linewidth=2)
if label:
ax.get_lines()[-1].set_label(label)
ax.legend()
ax.set_xlabel('Time (UTC)')
ax.set_ylabel('Particle number concentration (cm$^{-3})$')
if particles.index.dtype.type.__name__ == 'datetime64':
f.autofmt_xdate()
return ax
def zoom_time(self, start=None, end=None):
"""
2014-11-24 16:02:30
"""
dist = self.copy()
dist.data = dist.data.truncate(before=start, after=end)
return dist
def average_overTime(self, window='1S'):
"""returns a copy of the sizedistribution_TS with reduced size by averaging over a given window
Arguments
---------
window: str ['1S']. Optional
window over which to average. For aliases see
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
Returns
-------
SizeDistribution_TS instance
copy of current instance with resampled data frame
"""
dist = self.copy()
window = window
dist.data = dist.data.resample(window, closed='right', label='right')
if dist.distributionType == 'calibration':
dist.data.values[np.where(np.isnan(self.data.values))] = 0
return dist
def average_overAllTime(self):
"""
averages over the entire dataFrame and returns a single sizedistribution (numpy.ndarray)
"""
singleHist = np.zeros(self.data.shape[1])
for i in range(self.data.shape[1]):
line = self.data.values[:, i]
singleHist[i] = np.average(line[~np.isnan(line)])
data = pd.DataFrame(np.array([singleHist]), columns=self.data.columns)
avgDist = SizeDist(data, self.bins, self.distributionType)
return avgDist
def convert2layerseries(self, hk, layer_thickness=10, force=False):
"""convertes the time series to a layer series.
Note
----
nan values are excluded when an average is taken over a the time that corresponds to the particular layer
(altitude). If there are only nan values nan is returned and there is a gap in the Layerseries.
The the housekeeping instance has to have a column called "Altitude" and which is monotonicly in- or decreasing
Arguments
---------
hk: housekeeping instance
layer_thickness (optional): [10] thickness of each generated layer in meter"""
if any(np.isnan(hk.data.Altitude)):
txt = """The Altitude contains nan values. Either fix this first, eg. with pandas interpolate function"""
raise ValueError(txt)
if ((hk.data.Altitude.values[1:] - hk.data.Altitude.values[:-1]).min() < 0) and (
(hk.data.Altitude.values[1:] - hk.data.Altitude.values[:-1]).max() > 0):
if force:
hk.data = hk.data.sort(columns='Altitude')
else:
txt = '''Given altitude data is not monotonic. This is not possible (yet). Use force if you
know what you are doing'''
raise ValueError(txt)
start_h = round(hk.data.Altitude.values.min() / layer_thickness) * layer_thickness
end_h = round(hk.data.Altitude.values.max() / layer_thickness) * layer_thickness
layer_edges = np.arange(start_h, end_h, layer_thickness)
empty_frame = pd.DataFrame(columns=self.data.columns)
lays = SizeDist_LS(empty_frame, self.bins, self.distributionType, None)
for e, end_h_l in enumerate(layer_edges[1:]):
start_h_l = layer_edges[e]
layer = hk.data.Altitude.iloc[
np.where(np.logical_and(start_h_l < hk.data.Altitude.values, hk.data.Altitude.values < end_h_l))]
start_t = layer.index.min()
end_t = layer.index.max()
dist_tmp = self.zoom_time(start=start_t, end=end_t)
avrg = dist_tmp.average_overAllTime()
# return avrg,lays
lays.add_layer(avrg, (start_h_l, end_h_l))
lays.parent_dist_TS = self
lays.parent_timeseries = hk
data = hk.data.copy()
data['Time_UTC'] = data.index
data.index = data.Altitude
data = data.sort_index()
if not data.index.is_unique: #this is needed in case there are duplicate indeces
grouped = data.groupby(level = 0)
data = grouped.last()
lays.housekeeping = data
data = data.reindex(lays.layercenters,method = 'nearest')
lays.housekeeping = vertical_profile.VerticalProfile(data)
return lays
class SizeDist_LS(SizeDist):
"""
Parameters
----------
data: pandas DataFrame ...
bins: array
distributionType: str
layerbounderies: array shape(n_layers,2)
OLD
---
data: pandas dataFrame with
- column names (each name is something like this: '150-200')
- altitude (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
"""
def __init__(self, data, bins, distributionType, layerbounderies, fixGaps=True):
super(SizeDist_LS, self).__init__(data, bins, distributionType, fixGaps=True)
if type(layerbounderies).__name__ == 'NoneType':
self.layerbounderies = np.empty((0, 2))
# self.layercenters = np.array([])
else:
self.layerbounderies = layerbounderies
@property
def layercenters(self):
return self.__layercenters
@property
def layerbounderies(self):
return self.__layerbouderies
@layerbounderies.setter
def layerbounderies(self,lb):
self.__layerbouderies = lb
# newlb = np.unique(self.layerbounderies.flatten()) # the unique is sorting the data, which is not reallyt what we want!
# self.__layercenters = (newlb[1:] + newlb[:-1]) / 2.
self.__layercenters = (self.layerbounderies[:,0] + self.layerbounderies[:,1]) / 2.
self.data.index = self.layercenters
def apply_hygro_growth(self, kappa, RH = None, how='shift_data'):
""" see docstring of atmPy.sizedistribution.SizeDist for more information
Parameters
----------
kappa: float
RH: bool, float, or array.
If None, RH from self.housekeeping will be taken"""
if not np.any(RH):
pandas_tools.ensure_column_exists(self.housekeeping.data, 'Relative_humidity')
RH = self.housekeeping.data.Relative_humidity.values
# return kappa,RH,how
sd = super(SizeDist_LS,self).apply_hygro_growth(kappa,RH,how = how)
# size_distr = out['size_distribution']
# gf = out['growth_factor']
sd_LS = SizeDist_LS(sd.data, sd.bins, sd.distributionType, self.layerbounderies, fixGaps=False)
sd_LS.index_of_refraction = sd.index_of_refraction
sd_LS._SizeDist__growth_factor = sd.growth_factor
# out['size_distribution'] = sd_LS
return sd_LS
def calculate_angstromex(self, wavelengths=[460.3, 550.4, 671.2, 860.7], n=1.455):
"""Calculates the Anstrome coefficience (overall, layerdependent)
Parameters
----------
wavelengths: array-like, optional.
the angstrom coefficient will be calculated based on the AOD of these wavelength values (in nm)
n: float, optional.
index of refraction used in the underlying mie calculation.
Returns
-------
Angstrom exponent, float
List containing the OpticalProperties instances for the different wavelengths
New Attributes
--------------
angstromexp: float
the resulting angstrom exponent
angstromexp_fit: pandas instance.
AOD and fit result as a function of wavelength
angstromexp_LS: pandas instance.
angstrom exponent as a function of altitude
"""
AOD_list = []
AOD_dict = {}
for w in wavelengths:
AOD = self.calculate_optical_properties(w, n) # calculate_AOD(wavelength=w, n=n)
# opt= sizedistribution.OpticalProperties(AOD, dist_LS.bins)
AOD_list.append({'wavelength': w, 'opt_inst': AOD})
AOD_dict['%.1f' % w] = AOD
eg = AOD_dict[list(AOD_dict.keys())[0]]
wls = AOD_dict.keys()
wls_a = np.array(list(AOD_dict.keys())).astype(float)
ang_exp = []
ang_exp_std = []
ang_exp_r_value = []
for e, el in enumerate(eg.layercenters):
AODs = np.array([AOD_dict[wl].data_orig['AOD_layer'].values[e][0] for wl in wls])
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(wls_a), np.log10(AODs))
ang_exp.append(-slope)
ang_exp_std.append(std_err)
ang_exp_r_value.append(r_value)
# break
ang_exp = np.array(ang_exp)
ang_exp_std = np.array(ang_exp_std)
ang_exp_r_value = np.array(ang_exp_r_value)
tmp = np.array([[float(i), AOD_dict[i].AOD] for i in AOD_dict.keys()])
wavelength, AOD = tmp[np.argsort(tmp[:, 0])].transpose()
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(wavelength), np.log10(AOD))
self.angstromexp = -slope
aod_fit = np.log10(wavelengths) * slope + intercept
self.angstromexp_fit = pd.DataFrame(np.array([AOD, 10 ** aod_fit]).transpose(), index=wavelength,
columns=['data', 'fit'])
self.angstromexp_LS = pd.DataFrame(np.array([ang_exp, ang_exp_std, ang_exp_r_value]).transpose(),
index=self.layercenters,
columns=['ang_exp', 'standard_dif', 'correlation_coef'])
self.angstromexp_LS.index.name = 'layercenter'
return -slope, AOD_dict
def calculate_optical_properties(self, wavelength, n = None, noOfAngles=100):
if not n:
n = self.index_of_refraction
if not n:
txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
raise ValueError(txt)
out = _calculate_optical_properties(self, wavelength, n, aod = True, noOfAngles=noOfAngles)
opt_properties = OpticalProperties(out, self.bins)
opt_properties.wavelength = wavelength
opt_properties.index_of_refractio = n
opt_properties.angular_scatt_func = out['angular_scatt_func'] # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
opt_properties.parent_dist_LS = self
return opt_properties
def add_layer(self, sd, layerboundery):
"""
Adds a sizedistribution instance to the layerseries.
layerboundery
Parameters
----------
sd:
layerboundary:
"""
if len(layerboundery) != 2:
raise ValueError('layerboundery has to be of length 2')
sd = sd._convert2otherDistribution(self.distributionType)
layerbounderies = np.append(self.layerbounderies, np.array([layerboundery]), axis=0)
layerbounderiesU = np.unique(layerbounderies)
if (np.where(layerbounderiesU == layerboundery[1])[0] - np.where(layerbounderiesU == layerboundery[0])[0])[
0] != 1:
raise ValueError('The new layer is overlapping with an existing layer!')
self.data = self.data.append(sd.data)
self.layerbounderies = layerbounderies
# self.layerbounderies.sort(axis=0)
#
# layercenter = np.array(layerboundery).sum() / 2.
# self.layercenters = np.append(self.layercenters, layercenter)
# self.layercenters.sort()
# size_distr.data.index = np.array([layercenter])
# self.data = self.data.append(size_distr.data)
return
def _getXYZ(self):
"""
This will create three arrays, so when plotted with pcolor each pixel will represent the exact bin width
"""
binArray = np.repeat(np.array([self.bins]), self.data.index.shape[0], axis=0)
layerArray = np.repeat(np.array([self.data.index.values]), self.bins.shape[0], axis=0).transpose()
ext = np.array([np.zeros(self.data.index.values.shape)]).transpose()
Z = np.append(self.data.values, ext, axis=1)
return layerArray, binArray, Z
def plot_eachLayer(self, a=None, normalize=False):
"""
Plots the distribution of each layer in one plot.
Returns
-------
Handles to the figure and axes of the plot
"""
if not a:
f, a = plt.subplots()
else:
f = None
pass
for iv in self.data.index.values:
if normalize:
a.plot(self.bincenters, self.data.loc[iv, :] / self.data.loc[iv, :].max(), label='%i' % iv)
else:
a.plot(self.bincenters, self.data.loc[iv, :], label='%i' % iv)
a.set_xlabel('Particle diameter (nm)')
a.set_ylabel(get_label(self.distributionType))
a.legend()
a.semilogx()
return f, a
def plot(self, vmax=None, vmin=None, scale='linear', show_minor_tickLabels=True,
removeTickLabels=["500", "700", "800", "900"],
plotOnTheseAxes=False,
cmap=plt_tools.get_colorMap_intensity(),
fit_pos=True,
ax=None,
colorbar = True):
""" plots and returns f,a,pc,cb (figure, axis, pcolormeshInstance, colorbar)
Arguments
---------
scale (optional): ('log',['linear']) - defines how the z-direction is scaled
vmax
vmin
show_minor_tickLabels:
cma:
fit_pos (optional): bool [True] - plots the position of a fitted normal distribution onto the plot.
in order for this to work execute fit_normal
ax (optional): axes instance [None] - option to plot on existing axes
"""
X, Y, Z = self._getXYZ()
Z = np.ma.masked_invalid(Z)
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
# f.autofmt_xdate()
if scale == 'log':
scale = LogNorm()
elif scale == 'linear':
scale = None
pc = a.pcolormesh(Y, X, Z, vmin=vmin, vmax=vmax, norm=scale, cmap=cmap)
a.set_yscale('linear')
a.set_xscale('log')
a.set_xlim((self.bins[0], self.bins[-1]))
a.set_ylabel('Altitude (m)')
a.set_ylim((self.layercenters[0], self.layercenters[-1]))
a.set_xlabel('Diameter (nm)')
a.get_yaxis().set_tick_params(direction='out', which='both')
a.get_xaxis().set_tick_params(direction='out', which='both')
if colorbar:
cb = f.colorbar(pc)
label = get_label(self.distributionType)
cb.set_label(label)
else:
cb = None
if self.distributionType != 'calibration':
a.xaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
a.xaxis.set_major_formatter(plt.FormatStrFormatter("%i"))
f.canvas.draw() # this is important, otherwise the ticks (at least in case of minor ticks) are not created yet
ticks = a.xaxis.get_minor_ticks()
for i in ticks:
if i.label.get_text() in removeTickLabels:
i.label.set_visible(False)
if fit_pos:
if 'data_fit_normal' in dir(self):
a.plot(self.data_fit_normal.Pos, self.layercenters, color='m', linewidth=2, label='normal dist. center')
leg = a.legend(fancybox=True, framealpha=0.5)
leg.draw_frame(True)
return f, a, pc, cb
#todo: when you want to plot one plot on existing one it will rotated it twice!
def plot_particle_concentration(self, ax=None, label=None):
"""Plots the particle concentration as a function of altitude.
Parameters
----------
ax: matplotlib.axes instance, optional
perform plot on these axes.
rotate: bool.
When True the y-axes is the Altitude.
Returns
-------
matplotlib.axes instance
"""
# ax = SizeDist_TS.plot_particle_concetration(self, ax=ax, label=label)
# ax.set_xlabel('Altitude (m)')
#
# if rotate:
# g = ax.get_lines()[-1]
# x, y = g.get_xydata().transpose()
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# ax.set_xlim(ylim)
# ax.set_ylim(xlim)
# g.set_xdata(y)
# g.set_ydata(x)
# xlabel = ax.get_xlabel()
# ylabel = ax.get_ylabel()
# ax.set_xlabel(ylabel)
# ax.set_ylabel(xlabel)
if type(ax).__name__ in axes_types:
color = plt_tools.color_cycle[len(ax.get_lines())]
f = ax.get_figure()
else:
f, ax = plt.subplots()
color = plt_tools.color_cycle[0]
# layers = self.convert2numberconcentration()
particles = self.get_particle_concentration().dropna()
ax.plot(particles.Count_rate.values, particles.index.values, color=color, linewidth=2)
if label:
ax.get_lines()[-1].set_label(label)
ax.legend()
ax.set_ylabel('Altitude (m)')
ax.set_xlabel('Particle number concentration (cm$^{-3})$')
return ax
def plot_fitres(self, amp=True, rotate=True):
""" Plots the results from fit_normal
Arguments
---------
amp: bool.
if the amplitude is to be plotted
"""
f, a = plt.subplots()
a.fill_between(self.layercenters, self.data_fit_normal.Sigma_high, self.data_fit_normal.Sigma_low,
color=plt_tools.color_cycle[0],
alpha=0.5,
)
self.data_fit_normal.Pos.plot(ax=a, color=plt_tools.color_cycle[0], linewidth=2)
g = a.get_lines()[-1]
g.set_label('Center of norm. dist.')
a.legend(loc=2)
a.set_ylabel('Particle diameter (nm)')
a.set_xlabel('Altitude (m)')
if amp:
a2 = a.twinx()
self.data_fit_normal.Amp.plot(ax=a2, color=plt_tools.color_cycle[1], linewidth=2)
g = a2.get_lines()[-1]
g.set_label('Amplitude of norm. dist.')
a2.legend()
a2.set_ylabel('Amplitude - %s' % (get_label(self.distributionType)))
else:
a2 = False
return f, a, a2
def plot_angstromex_fit(self):
if 'angstromexp_fit' not in dir(self):
raise ValueError('Execute function calculate_angstromex first!')
f, a = plt.subplots()
a.plot(self.angstromexp_fit.index, self.angstromexp_fit.data, 'o', color=plt_tools.color_cycle[0],
label='exp. data')
a.plot(self.angstromexp_fit.index, self.angstromexp_fit.fit, color=plt_tools.color_cycle[1], label='fit',
linewidth=2)
a.set_xlim((self.angstromexp_fit.index.min() * 0.95, self.angstromexp_fit.index.max() * 1.05))
a.set_ylim((self.angstromexp_fit.data.min() * 0.95, self.angstromexp_fit.data.max() * 1.05))
a.set_xlabel('Wavelength (nm)')
a.set_ylabel('AOD')
a.loglog()
a.xaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
a.yaxis.set_minor_formatter(plt.FormatStrFormatter("%.2f"))
return a
def plot_angstromex_LS(self, corr_coeff=False, std=False):
if 'angstromexp_fit' not in dir(self):
raise ValueError('Execute function calculate_angstromex first!')
f, a = plt.subplots()
a.plot(self.angstromexp_LS.index, self.angstromexp_LS.ang_exp, color=plt_tools.color_cycle[0], linewidth=2,
label='Angstrom exponent')
a.set_xlabel('Altitude (m)')
a.set_ylabel('Angstrom exponent')
if corr_coeff:
a.legend(loc=2)
a2 = a.twinx()
a2.plot(self.angstromexp_LS.index, self.angstromexp_LS.correlation_coef, color=plt_tools.color_cycle[1],
linewidth=2, label='corr_coeff')
a2.set_ylabel('Correlation coefficiant')
a2.legend(loc=1)
if std:
a.legend(loc=2)
a2 = a.twinx()
a2.plot(self.angstromexp_LS.index, self.angstromexp_LS.standard_dif, color=plt_tools.color_cycle[1],
linewidth=2, label='corr_coeff')
a2.set_ylabel('Standard deviation')
a2.legend(loc=1)
tmp = (self.angstromexp_LS.index.max() - self.angstromexp_LS.index.min()) * 0.05
a.set_xlim((self.angstromexp_LS.index.min() - tmp, self.angstromexp_LS.index.max() + tmp))
return a
def zoom_altitude(self, bottom, top):
"""'2014-11-24 16:02:30'"""
dist = self.copy()
dist.data = dist.data.truncate(before=bottom, after=top)
where = np.where(np.logical_and(dist.layercenters < top, dist.layercenters > bottom))
# dist.layercenters = dist.layercenters[where]
dist.layerbounderies = dist.layerbounderies[where]
if 'data_fit_normal' in dir(dist):
dist.data_fit_normal = dist.data_fit_normal.iloc[where]
return dist
# dist = self.copy()
# dist.data = dist.data.truncate(before=start, after = end)
# return dist
#
def average_overAltitude(self, window='1S'):
print('need fixn')
return False
# window = window
# self.data = self.data.resample(window, closed='right',label='right')
# if self.distributionType == 'calibration':
# self.data.values[np.where(np.isnan(self.data.values))] = 0
# return
def average_overAllAltitudes(self):
dataII = self.data.mean(axis=0)
out = pd.DataFrame(dataII).T
return SizeDist(out, self.bins, self.distributionType)
def fit_normal(self):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
super(SizeDist_LS, self).fit_normal()
self.data_fit_normal.index = self.layercenters
return self.data_fit_normal
# singleHist = np.zeros(self.data.shape[1])
# for i in xrange(self.data.shape[1]):
# line = self.data.values[:,i]
# singleHist[i] = np.average(line[~np.isnan(line)])
# return singleHist
#Todo: bins are redundand
# Todo: some functions should be switched of
class OpticalProperties(object):
def __init__(self, data, bins):
# self.data = data['extCoeffPerLayer']
self.data = data['extCoeff_perrow_perbin']
self.data_orig = data
self.AOD = data['AOD']
self.bins = bins
self.layercenters = self.data.index.values
self.asymmetry_parameter_LS = data['asymmetry_param']
# self.asymmetry_parameter_LS_alt = data['asymmetry_param_alt']
# ToDo: to define a distribution type does not really make sence ... just to make the stolen plot function happy
self.distributionType = 'dNdlogDp'
def get_extinction_coeff_verticle_profile(self):
"""
Creates a verticle profile of the extinction coefficient.
"""
ext = self.data.sum(axis=1)
ext = pd.DataFrame(ext, columns=['ext. coeff.'])
ext.index.name = 'Altitude'
out = ExtinctionCoeffVerticlProfile(ext, self, self.wavelength, self.index_of_refractio)
# out.wavelength = self.wavelength
# out.n = self.index_of_refractio
# out.parent = self
return out
def plot_AOD_cum(self, color=plt_tools.color_cycle[0], linewidth=2, ax=None, label='cumulative AOD',
extra_info=True):
if not ax:
f,a = plt.subplots()
else:
a = ax
# a = self.data_orig['AOD_cum'].plot(color=color, linewidth=linewidth, ax=ax, label=label)
g, = a.plot(self.data_orig['AOD_cum']['AOD per Layer'], self.data_orig['AOD_cum'].index, color=color, linewidth=linewidth, label=label)
# g = a.get_lines()[-1]
g.set_label(label)
a.legend()
# a.set_xlim(0, 3000)
a.set_ylabel('Altitude (m)')
a.set_xlabel('AOD')
txt = '''$\lambda = %s$ nm
n = %s
AOD = %.4f''' % (self.data_orig['wavelength'], self.data_orig['n'], self.data_orig['AOD'])
if extra_info:
a.text(0.7, 0.7, txt, transform=a.transAxes)
return a
def _getXYZ(self):
out = SizeDist_LS._getXYZ(self)
return out
def plot_extCoeffPerLayer(self,
vmax=None,
vmin=None,
scale='linear',
show_minor_tickLabels=True,
removeTickLabels=['500', '700', '800', '900'],
plotOnTheseAxes=False, cmap=plt_tools.get_colorMap_intensity(),
fit_pos=True,
ax=None):
f, a, pc, cb = SizeDist_LS.plot(self,
vmax=vmax,
vmin=vmin,
scale=scale,
show_minor_tickLabels=show_minor_tickLabels,
removeTickLabels=removeTickLabels,
plotOnTheseAxes=plotOnTheseAxes,
cmap=cmap,
fit_pos=fit_pos,
ax=ax)
cb.set_label('Extinction coefficient ($m^{-1}$)')
return f, a, pc, cb
class ExtinctionCoeffVerticlProfile(vertical_profile.VerticalProfile):
def __init__(self, ext, parent, wavelength, index_of_refraction):
super(ExtinctionCoeffVerticlProfile, self).__init__(ext)
self.parent = parent
self.wavelength = wavelength
self.index_of_refraction = index_of_refraction
def plot(self, *args, **kwargs):
a = super(ExtinctionCoeffVerticlProfile, self).plot(*args, **kwargs)
a.set_xlabel('Extinction coefficient (m$^{-1}$)')
return a
def simulate_sizedistribution(diameter=[10, 2500], numberOfDiameters=100, centerOfAerosolMode=200,
widthOfAerosolMode=0.2, numberOfParticsInMode=1000):
"""generates a numberconcentration of an aerosol layer which has a gaussian shape when plottet in dN/log(Dp).
However, returned is a numberconcentrations (simply the number of particles in each bin, no normalization)
Returns
Number concentration (#)
bin edges (nm)"""
start = diameter[0]
end = diameter[1]
noOfD = numberOfDiameters
centerDiameter = centerOfAerosolMode
width = widthOfAerosolMode
bins = np.linspace(np.log10(start), np.log10(end), noOfD)
binwidth = bins[1:] - bins[:-1]
bincenters = (bins[1:] + bins[:-1]) / 2.
dNDlogDp = plt.mlab.normpdf(bincenters, np.log10(centerDiameter), width)
extraScale = 1
scale = 1
while 1:
NumberConcent = dNDlogDp * binwidth * scale * extraScale
if scale != 1:
break
else:
scale = float(numberOfParticsInMode) / NumberConcent.sum()
binEdges = 10 ** bins
diameterBinwidth = binEdges[1:] - binEdges[:-1]
cols = []
for e, i in enumerate(binEdges[:-1]):
cols.append(str(i) + '-' + str(binEdges[e + 1]))
data = pd.DataFrame(np.array([NumberConcent / diameterBinwidth]), columns=cols)
return SizeDist(data, binEdges, 'dNdDp')
def simulate_sizedistribution_timeseries(diameter=[10, 2500], numberOfDiameters=100, centerOfAerosolMode=200,
widthOfAerosolMode=0.2, numberOfParticsInMode=1000,
startDate='2014-11-24 17:00:00',
endDate='2014-11-24 18:00:00', frequency=10):
delta = datetime.datetime.strptime(endDate, '%Y-%m-%d %H:%M:%S') - datetime.datetime.strptime(startDate,
'%Y-%m-%d %H:%M:%S')
periods = delta.total_seconds() / float(frequency)
rng = pd.date_range(startDate, periods=periods, freq='%ss' % frequency)
noOfOsz = 5
ampOfOsz = 100
oszi = np.linspace(0, noOfOsz * 2 * np.pi, periods)
sdArray = np.zeros((periods, numberOfDiameters - 1))
for e, i in enumerate(rng):
sdtmp = simulate_sizedistribution(diameter=diameter,
numberOfDiameters=numberOfDiameters,
centerOfAerosolMode=centerOfAerosolMode + (ampOfOsz * np.sin(oszi[e])))
sdArray[e] = sdtmp.data
sdts = pd.DataFrame(sdArray, index=rng, columns=sdtmp.data.columns)
return SizeDist_TS(sdts, sdtmp.bins, sdtmp.distributionType)
def simulate_sizedistribution_layerseries(diameter=[10, 2500], numberOfDiameters=100, heightlimits=[0, 6000],
noOflayers=100, layerHeight=[500., 4000.], layerThickness=[100., 300.],
layerDensity=[1000., 5000.], layerModecenter=[200., 800.], widthOfAerosolMode = 0.2 ):
gaussian = lambda x, mu, sig: np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
lbt = np.linspace(heightlimits[0], heightlimits[1], noOflayers + 1)
layerbounderies = np.array([lbt[:-1], lbt[1:]]).transpose()
layercenter = (lbt[1:] + lbt[:-1]) / 2.
# strata = np.linspace(heightlimits[0],heightlimits[1],noOflayers+1)
layerArray = np.zeros((noOflayers, numberOfDiameters - 1))
for e, stra in enumerate(layercenter):
for i, lay in enumerate(layerHeight):
sdtmp = simulate_sizedistribution(diameter=diameter, numberOfDiameters=numberOfDiameters,
widthOfAerosolMode=widthOfAerosolMode, centerOfAerosolMode=layerModecenter[i],
numberOfParticsInMode=layerDensity[i])
layerArray[e] += sdtmp.data.values[0] * gaussian(stra, layerHeight[i], layerThickness[i])
sdls = pd.DataFrame(layerArray, index=layercenter, columns=sdtmp.data.columns)
return SizeDist_LS(sdls, sdtmp.bins, sdtmp.distributionType, layerbounderies)
def generate_aerosolLayer(diameter=[.01, 2.5], numberOfDiameters=30, centerOfAerosolMode=0.6,
widthOfAerosolMode=0.2, numberOfParticsInMode=10000, layerBoundery=[0., 10000], ):
"""Probably deprecated!?! generates a numberconcentration of an aerosol layer which has a gaussian shape when plottet in dN/log(Dp).
However, returned is a numberconcentrations (simply the number of particles in each bin, no normalization)
Returns
Number concentration (#)
bin edges (nm)"""
layerBoundery = np.array(layerBoundery)
start = diameter[0]
end = diameter[1]
noOfD = numberOfDiameters
centerDiameter = centerOfAerosolMode
width = widthOfAerosolMode
bins = np.linspace(np.log10(start), np.log10(end), noOfD)
binwidth = bins[1:] - bins[:-1]
bincenters = (bins[1:] + bins[:-1]) / 2.
dNDlogDp = plt.mlab.normpdf(bincenters, np.log10(centerDiameter), width)
extraScale = 1
scale = 1
while 1:
NumberConcent = dNDlogDp * binwidth * scale * extraScale
if scale != 1:
break
else:
scale = float(numberOfParticsInMode) / NumberConcent.sum()
binEdges = 10 ** bins
# diameterBinCenters = (binEdges[1:] + binEdges[:-1])/2.
diameterBinwidth = binEdges[1:] - binEdges[:-1]
cols = []
for e, i in enumerate(binEdges[:-1]):
cols.append(str(i) + '-' + str(binEdges[e + 1]))
layerBoundery = np.array([0., 10000.])
# layerThickness = layerBoundery[1:] - layerBoundery[:-1]
layerCenter = [5000.]
data = pd.DataFrame(np.array([NumberConcent / diameterBinwidth]), index=layerCenter, columns=cols)
# return data
# atmosAerosolNumberConcentration = pd.DataFrame()
# atmosAerosolNumberConcentration['bin_center'] = pd.Series(diameterBinCenters)
# atmosAerosolNumberConcentration['bin_start'] = pd.Series(binEdges[:-1])
# atmosAerosolNumberConcentration['bin_end'] = pd.Series(binEdges[1:])
# atmosAerosolNumberConcentration['numberConcentration'] = pd.Series(NumberConcent)
# return atmosAerosolNumberConcentration
return SizeDist_LS(data, binEdges, 'dNdDp', layerBoundery)
def test_generate_numberConcentration():
"""result should look identical to Atmospheric Chemistry and Physis page 422"""
nc = generate_aerosolLayer(diameter=[0.01, 10], centerOfAerosolMode=0.8, widthOfAerosolMode=0.3,
numberOfDiameters=100, numberOfParticsInMode=1000, layerBoundery=[0.0, 10000])
plt.plot(nc.bincenters, nc.data.values[0].transpose() * nc.binwidth, label='numberConc')
plt.plot(nc.bincenters, nc.data.values[0].transpose(), label='numberDist')
ncLN = nc.convert2dNdlogDp()
plt.plot(ncLN.bincenters, ncLN.data.values[0].transpose(), label='LogNormal')
plt.legend()
plt.semilogx()
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = np.asarray(diam)
extinction_efficiency = np.zeros(diam.shape)
scattering_efficiency = np.zeros(diam.shape)
absorption_efficiency = np.zeros(diam.shape)
extinction_crossection = np.zeros(diam.shape)
scattering_crossection = np.zeros(diam.shape)
absorption_crossection = np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = pd.DataFrame(index=diam)
out['extinction_efficiency'] = pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural
def _get_coefficients(crossection, cn):
"""
Calculates the extinction, scattering or absorbtion coefficient
Parameters
----------
crosssection: float
Units are um^2
cn: float
Particle concentration in cc^-1
Returns
--------
coefficient in m^-1. This is the differential AOD.
"""
crossection = crossection.copy()
cn = cn.copy()
crossection *= 1e-12 # conversion from um^2 to m^2
cn *= 1e6 # conversion from cm^-3 to m^-3
coefficient = cn * crossection
# print('cn',cn)
# print('crossection', crossection)
# print('coeff',coefficient)
# print('\n')
return coefficient
def test_ext_coeff_vertical_profile():
#todo: make this a real test
dist = simulate_sizedistribution_layerseries(layerHeight=[3000.0, 3000.0],
layerDensity=[1000.0, 100.0],
layerModecenter=[100.0, 100.0],
layerThickness=[6000, 6000],
widthOfAerosolMode = 0.01,
noOflayers=3,
numberOfDiameters=1000)
dist.plot()
dist = dist.zoom_diameter(99,101)
avg = dist.average_overAllAltitudes()
f,a = avg.plot()
a.set_xscale('linear')
opt = dist.calculate_optical_properties(550, n = 1.455)
opt_II = dist.calculate_optical_properties(550, n = 1.1)
opt_III = dist.calculate_optical_properties(550, n = 4.)
ext = opt.get_extinction_coeff_verticle_profile()
ext_II = opt_II.get_extinction_coeff_verticle_profile()
ext_III = opt_III.get_extinction_coeff_verticle_profile()
tvI_is = (ext_III.data/ext.data).values[0][0]
tvI_want = 14.3980239083
tvII_is = (ext_II.data/ext.data).values[0][0]
tvII_want = 0.05272993413
print('small deviations could come from averaging over multiple bins with slightly different diameter')
print('test values 1 is/should_be: %s/%s'%(tvI_is,tvI_want))
print('test values 2 is/should_be: %s/%s'%(tvII_is,tvII_want))
return False | mit |
levinsamuel/rand | python/scripts/unsorted-recursion-search.py | 1 | 1626 | #!/usr/bin/env python
# coding: utf-8
# In[498]:
import sys, math, random, logging, time
import numpy as np
import pandas as pd
# global tracker for recursive depth
level = 0
# binary-search-like algorithm
def unsorted_search(arr, num):
global level
level = 0
return _us(arr, num, 0, len(arr)-1, 0)
def _us(arr, num, l, r, lev):
global level
level = max(lev, level)
if l > r:
return -1
m = (l+r)//2
if arr[m] == num:
return m
else:
step1 = _us(arr, num, l, m-1, lev+1)
if step1 == -1:
step2 = _us(arr, num, m+1, r, lev+1)
return max(step1, step2)
else:
return step1
# linear search
def recSearch(arr, x):
global level
level = 0
return _recSearch(arr, 0, len(arr)-1, x, 0)
def _recSearch( arr, l, r, x, lev):
global level
level = max(level, lev)
if r < l:
return -1
if arr[l] == x:
return l
if arr[r] == x:
return r
return _recSearch(arr, l+1, r-1, x, lev+1)
la = np.random.randint(10000, size=990000)
# log.debug("array: %s", la)
times = [time.time()]
print('Binary-like search:')
print(unsorted_search(la, 50))
print("max level:", level)
times.append(time.time())
print('\nLinear recursive search:')
try:
print(recSearch(la, 50))
except RecursionError:
print('Execution failed with recursion error')
print("max level:", level)
times.append(time.time())
# construct times
df = pd.DataFrame({'starts': times[0:2], 'ends': times[1:3]})
df['diffs'] = df['ends'] - df['starts']
print('\nExecution times')
print(df)
| mit |
elvandy/nltools | nltools/data/brain_data.py | 1 | 61693 | from __future__ import division
'''
NeuroLearn Brain Data
=====================
Classes to represent brain image data.
'''
# Notes:
# Need to figure out how to speed up loading and resampling of data
__author__ = ["Luke Chang"]
__license__ = "MIT"
from nilearn.signal import clean
from scipy.stats import ttest_1samp
from scipy.stats import t as t_dist
from scipy.signal import detrend
import os
import shutil
import nibabel as nib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
import tempfile
from copy import deepcopy
import six
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from sklearn.utils import check_random_state
from pynv import Client
from joblib import Parallel, delayed
from nltools.mask import expand_mask
from nltools.analysis import Roc
from nilearn.input_data import NiftiMasker
from nilearn.plotting import plot_stat_map
from nilearn.image import resample_img
from nilearn.masking import intersect_masks
from nilearn.regions import connected_regions, connected_label_regions
from nltools.utils import (get_resource_path,
set_algorithm,
get_anatomical,
attempt_to_import,
concatenate,
_bootstrap_apply_func,
set_decomposition_algorithm)
from nltools.cross_validation import set_cv
from nltools.plotting import (scatterplot,
roc_plot,
plot_stacked_adjacency,
plot_silhouette)
from nltools.stats import (pearson,
fdr,
threshold,
fisher_r_to_z,
correlation_permutation,
one_sample_permutation,
two_sample_permutation,
downsample,
upsample,
zscore,
make_cosine_basis,
transform_pairwise,
summarize_bootstrap,
procrustes)
from nltools.stats import regress as regression
from .adjacency import Adjacency
from nltools.prefs import MNI_Template, resolve_mni_path
from nltools.external.srm import DetSRM, SRM
# Optional dependencies
nx = attempt_to_import('networkx', 'nx')
mne_stats = attempt_to_import('mne.stats',name='mne_stats', fromlist=
['spatio_temporal_cluster_1samp_test',
'ttest_1samp_no_p'])
MAX_INT = np.iinfo(np.int32).max
class Brain_Data(object):
"""
Brain_Data is a class to represent neuroimaging data in python as a vector
rather than a 3-dimensional matrix.This makes it easier to perform data
manipulation and analyses.
Args:
data: nibabel data instance or list of files
Y: Pandas DataFrame of training labels
X: Pandas DataFrame Design Matrix for running univariate models
mask: binary nifiti file to mask brain data
output_file: Name to write out to nifti file
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
"""
def __init__(self, data=None, Y=None, X=None, mask=None, output_file=None,
**kwargs):
if mask is not None:
if not isinstance(mask, nib.Nifti1Image):
if isinstance(mask, six.string_types):
if os.path.isfile(mask):
mask = nib.load(mask)
else:
raise ValueError("mask is not a nibabel instance or a "
"valid file name")
self.mask = mask
else:
self.mask = nib.load(resolve_mni_path(MNI_Template)['mask'])
self.nifti_masker = NiftiMasker(mask_img=self.mask)
if data is not None:
if isinstance(data, six.string_types):
if 'http://' in data:
from nltools.datasets import download_nifti
tmp_dir = os.path.join(tempfile.gettempdir(),
str(os.times()[-1]))
os.makedirs(tmp_dir)
data = nib.load(download_nifti(data, data_dir=tmp_dir))
else:
data = nib.load(data)
self.data = self.nifti_masker.fit_transform(data)
elif isinstance(data, list):
if isinstance(data[0], Brain_Data):
tmp = concatenate(data)
for item in ['data', 'Y', 'X', 'mask', 'nifti_masker',
'file_name']:
setattr(self, item, getattr(tmp,item))
else:
if all([isinstance(x,data[0].__class__) for x in data]):
self.data = []
for i in data:
if isinstance(i, six.string_types):
self.data.append(self.nifti_masker.fit_transform(
nib.load(i)))
elif isinstance(i, nib.Nifti1Image):
self.data.append(self.nifti_masker.fit_transform(i))
self.data = np.concatenate(self.data)
else:
raise ValueError('Make sure all objects in the list are the same type.')
elif isinstance(data, nib.Nifti1Image):
self.data = np.array(self.nifti_masker.fit_transform(data))
else:
raise ValueError("data is not a nibabel instance")
# Collapse any extra dimension
if any([x == 1 for x in self.data.shape]):
self.data = self.data.squeeze()
else:
self.data = np.array([])
if Y is not None:
if isinstance(Y, six.string_types):
if os.path.isfile(Y):
Y = pd.read_csv(Y, header=None, index_col=None)
if isinstance(Y, pd.DataFrame):
if self.data.shape[0] != len(Y):
raise ValueError("Y does not match the correct size "
"of data")
self.Y = Y
else:
raise ValueError("Make sure Y is a pandas data frame.")
else:
self.Y = pd.DataFrame()
if X is not None:
if isinstance(X, six.string_types):
if os.path.isfile(X):
X = pd.read_csv(X, header=None, index_col=None)
if isinstance(X, pd.DataFrame):
if self.data.shape[0] != X.shape[0]:
raise ValueError("X does not match the correct size "
"of data")
self.X = X
else:
raise ValueError("Make sure X is a pandas data frame.")
else:
self.X = pd.DataFrame()
if output_file is not None:
self.file_name = output_file
else:
self.file_name = []
def __repr__(self):
return '%s.%s(data=%s, Y=%s, X=%s, mask=%s, output_file=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.shape(),
len(self.Y),
self.X.shape,
os.path.basename(self.mask.get_filename()),
self.file_name
)
def __getitem__(self, index):
new = deepcopy(self)
if isinstance(index, int):
new.data = np.array(self.data[index, :]).flatten()
else:
if isinstance(index, slice):
new.data = self.data[index, :]
else:
index = np.array(index).flatten()
new.data = np.array(self.data[index, :])
if not self.Y.empty:
new.Y = self.Y.iloc[index]
new.Y.reset_index(inplace=True, drop=True)
if not self.X.empty:
new.X = self.X.iloc[index]
new.X.reset_index(inplace=True, drop=True)
return new
def __setitem__(self, index, value):
if not isinstance(value, Brain_Data):
raise ValueError("Make sure the value you are trying to set is a "
"Brain_Data() instance.")
self.data[index, :] = value.data
if not value.Y.empty:
self.Y.values[index] = value.Y
if not value.X.empty:
if self.X.shape[1] != value.X.shape[1]:
raise ValueError("Make sure self.X is the same size as "
"value.X.")
self.X.values[index] = value.X
def __len__(self):
return self.shape()[0]
def __add__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data + y
if isinstance(y, Brain_Data):
if self.shape() != y.shape():
raise ValueError("Both Brain_Data() instances need to be the "
"same shape.")
new.data = new.data + y.data
return new
def __sub__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data - y
if isinstance(y, Brain_Data):
if self.shape() != y.shape():
raise ValueError('Both Brain_Data() instances need to be the '
'same shape.')
new.data = new.data - y.data
return new
def __mul__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data * y
if isinstance(y, Brain_Data):
if self.shape() != y.shape():
raise ValueError("Both Brain_Data() instances need to be the "
"same shape.")
new.data = np.multiply(new.data, y.data)
return new
def __iter__(self):
for x in range(len(self)):
yield self[x]
def shape(self):
""" Get images by voxels shape. """
return self.data.shape
def mean(self):
""" Get mean of each voxel across images. """
out = deepcopy(self)
if len(self.shape()) > 1:
out.data = np.mean(self.data, axis=0)
out.X = pd.DataFrame()
out.Y = pd.DataFrame()
else:
out = np.mean(self.data)
return out
def std(self):
""" Get standard deviation of each voxel across images. """
out = deepcopy(self)
if len(self.shape()) > 1:
out.data = np.std(self.data, axis=0)
out.X = pd.DataFrame()
out.Y = pd.DataFrame()
else:
out = np.std(self.data)
return out
def sum(self):
""" Sum over voxels."""
out = deepcopy(self)
if len(self.shape()) > 1:
out.data = np.sum(out.data, axis=0)
out.X = pd.DataFrame()
out.Y = pd.DataFrame()
else:
out = np.sum(self.data)
return out
def to_nifti(self):
""" Convert Brain_Data Instance into Nifti Object """
return self.nifti_masker.inverse_transform(self.data)
def write(self, file_name=None):
""" Write out Brain_Data object to Nifti File.
Args:
file_name: name of nifti file
"""
self.to_nifti().to_filename(file_name)
def scale(self, scale_val=100.):
""" Scale all values such that theya re on the range 0 - scale_val, via grand-mean scaling. This is NOT global-scaling/intensity normalization. This is useful for ensuring that data is on a common scale (e.g. good for multiple runs, participants, etc) and if the default value of 100 is used, can be interpreted as something akin to (but not exactly) "percent signal change." This is consistent with default behavior in AFNI and SPM. Change this value to 10000 to make consistent with FSL.
Args:
scale_val (int/float): what value to send the grand-mean to; default 100
"""
out = deepcopy(self)
out.data = out.data / out.data.mean() * scale_val
return out
def plot(self, limit=5, anatomical=None, **kwargs):
""" Create a quick plot of self.data. Will plot each image separately
Args:
limit: max number of images to return
anatomical: nifti image or file name to overlay
"""
if anatomical is not None:
if not isinstance(anatomical, nib.Nifti1Image):
if isinstance(anatomical, six.string_types):
anatomical = nib.load(anatomical)
else:
raise ValueError("anatomical is not a nibabel instance")
else:
anatomical = nib.load(resolve_mni_path(MNI_Template)['plot'])
if self.data.ndim == 1:
f, a = plt.subplots(nrows=1, figsize=(15, 2))
plot_stat_map(self.to_nifti(), anatomical,
cut_coords=range(-40, 50, 10), display_mode='z',
black_bg=True, colorbar=True, draw_cross=False,
axes=a, **kwargs)
else:
n_subs = np.minimum(self.data.shape[0], limit)
f, a = plt.subplots(nrows=n_subs, figsize=(15, len(self)*2))
for i in range(n_subs):
plot_stat_map(self[i].to_nifti(), anatomical,
cut_coords=range(-40, 50, 10),
display_mode='z',
black_bg=True,
colorbar=True,
draw_cross=False,
axes = a[i],
**kwargs)
return f
def regress(self, mode='ols', **kwargs):
""" Run a mass-univariate regression across voxels. Three types of regressions can be run:
1) Standard OLS (default)
2) Robust OLS (heteroscedasticty and/or auto-correlation robust errors), i.e. OLS with "sandwich estimators"
3) ARMA (auto-regressive and moving-average lags = 1 by default; experimental)
For more information see the help for nltools.stats.regress
ARMA notes: This experimental mode is similar to AFNI's 3dREMLFit but without spatial smoothing of voxel auto-correlation estimates. It can be **very computationally intensive** so parallelization is used by default to try to speed things up. Speed is limited because a unique ARMA model is fit to *each voxel* (like AFNI/FSL), but unlike SPM, which assumes the same AR parameters (~0.2) at each voxel. While coefficient results are typically very similar to OLS, std-errors and so t-stats, dfs and and p-vals can differ greatly depending on how much auto-correlation is explaining the response in a voxel
relative to other regressors in the design matrix.
Args:
mode (str): kind of model to fit; must be one of 'ols' (default), 'robust', or 'arma'
kwargs (dict): keyword arguments to nltools.stats.regress
Returns:
out: dictionary of regression statistics in Brain_Data instances
{'beta','t','p','df','residual'}
"""
if not isinstance(self.X, pd.DataFrame):
raise ValueError('Make sure self.X is a pandas DataFrame.')
if self.X.empty:
raise ValueError('Make sure self.X is not empty.')
if self.data.shape[0] != self.X.shape[0]:
raise ValueError("self.X does not match the correct size of "
"self.data")
b,t,p,_,res = regression(self.X,self.data,mode=mode,**kwargs)
# Prevent copy of all data in self multiple times; instead start with an empty instance and copy only needed attributes from self, and use this as a template for other outputs
b_out = self.__class__()
b_out.mask = deepcopy(self.mask)
b_out.nifti_masker = deepcopy(self.nifti_masker)
# Use this as template for other outputs before setting data
t_out = b_out.copy()
p_out = b_out.copy()
sigma_out = b_out.copy()
res_out = b_out.copy()
b_out.data,t_out.data,p_out.data,sigma_out.data,res_out.data = (b,t,p,sigma_out,res)
return {'beta': b_out, 't': t_out, 'p': p_out,
'sigma': sigma_out, 'residual': res_out}
def ttest(self, threshold_dict=None):
""" Calculate one sample t-test across each voxel (two-sided)
Args:
threshold_dict: a dictionary of threshold parameters {'unc':.001}
or {'fdr':.05} or {'permutation':tcfe,
n_permutation:5000}
Returns:
out: dictionary of regression statistics in Brain_Data instances
{'t','p'}
"""
t = deepcopy(self)
p = deepcopy(self)
if threshold_dict is not None:
if 'permutation' in threshold_dict:
# Convert data to correct shape (subjects, time, space)
data_convert_shape = deepcopy(self.data)
data_convert_shape = np.expand_dims(data_convert_shape, axis=1)
if 'n_permutations' in threshold_dict:
n_permutations = threshold_dict['n_permutations']
else:
n_permutations = 1000
warnings.warn("n_permutations not set: running with 1000 "
"permutations")
if 'connectivity' in threshold_dict:
connectivity = threshold_dict['connectivity']
else:
connectivity = None
if 'n_jobs' in threshold_dict:
n_jobs = threshold_dict['n_jobs']
else:
n_jobs = 1
if threshold_dict['permutation'] is 'tfce':
perm_threshold = dict(start=0, step=0.2)
else:
perm_threshold = None
if 'stat_fun' in threshold_dict:
stat_fun = threshold_dict['stat_fun']
else:
stat_fun = mne_stats.ttest_1samp_no_p
t.data, clusters, p_values, _ = mne_stats.spatio_temporal_cluster_1samp_test(
data_convert_shape, tail=0, threshold=perm_threshold, stat_fun=stat_fun,
connectivity=connectivity, n_permutations=n_permutations, n_jobs=n_jobs)
t.data = t.data.squeeze()
p = deepcopy(t)
for cl, pval in zip(clusters, p_values):
p.data[cl[1][0]] = pval
else:
t.data, p.data = ttest_1samp(self.data, 0, 0)
else:
t.data, p.data = ttest_1samp(self.data, 0, 0)
if threshold_dict is not None:
if isinstance(threshold_dict, dict):
if 'unc' in threshold_dict:
thr = threshold_dict['unc']
elif 'fdr' in threshold_dict:
thr = fdr(p.data, q=threshold_dict['fdr'])
elif 'permutation' in threshold_dict:
thr = .05
thr_t = threshold(t, p, thr)
out = {'t': t, 'p': p, 'thr_t': thr_t}
else:
raise ValueError("threshold_dict is not a dictionary. "
"Make sure it is in the form of {'unc': .001} "
"or {'fdr': .05}")
else:
out = {'t': t, 'p': p}
return out
def append(self, data, **kwargs):
""" Append data to Brain_Data instance
Args:
data: Brain_Data instance to append
kwargs: optional inputs to Design_Matrix append
Returns:
out: new appended Brain_Data instance
"""
if not isinstance(data, Brain_Data):
raise ValueError('Make sure data is a Brain_Data instance')
if self.isempty():
out = deepcopy(data)
else:
error_string = ("Data to append has different number of voxels "
"then Brain_Data instance.")
if len(self.shape()) == 1 & len(data.shape()) == 1:
if self.shape()[0] != data.shape()[0]:
raise ValueError(error_string)
elif len(self.shape()) == 1 & len(data.shape()) > 1:
if self.shape()[0] != data.shape()[1]:
raise ValueError(error_string)
elif len(self.shape()) > 1 & len(data.shape()) == 1:
if self.shape()[1] != data.shape()[0]:
raise ValueError(error_string)
elif self.shape()[1] != data.shape()[1]:
raise ValueError(error_string)
out = deepcopy(self)
out.data = np.vstack([self.data, data.data])
if out.Y.size:
out.Y = self.Y.append(data.Y)
if self.X.size:
if isinstance(self.X, pd.DataFrame):
out.X = self.X.append(data.X,**kwargs)
else:
out.X = np.vstack([self.X, data.X])
return out
def empty(self, data=True, Y=True, X=True):
""" Initalize Brain_Data.data as empty """
tmp = deepcopy(self)
if data:
tmp.data = np.array([])
if Y:
tmp.Y = pd.DataFrame()
if X:
tmp.X = pd.DataFrame()
return tmp
def isempty(self):
""" Check if Brain_Data.data is empty """
if isinstance(self.data, np.ndarray):
if self.data.size:
boolean = False
else:
boolean = True
if isinstance(self.data, list):
if not self.data:
boolean = True
else:
boolean = False
return boolean
def similarity(self, image, method='correlation'):
""" Calculate similarity of Brain_Data() instance with single
Brain_Data or Nibabel image
Args:
image: Brain_Data or Nibabel instance of weight map
method: (str) Type of similarity
['correlation','dot_product','cosine']
Returns:
pexp: Outputs a vector of pattern expression values
"""
if not isinstance(image, Brain_Data):
if isinstance(image, nib.Nifti1Image):
image = Brain_Data(image, mask=self.mask)
else:
raise ValueError("Image is not a Brain_Data or nibabel "
"instance")
# Check to make sure masks are the same for each dataset and if not
# create a union mask
# This might be handy code for a new Brain_Data method
if np.sum(self.nifti_masker.mask_img.get_data() == 1) != np.sum(image.nifti_masker.mask_img.get_data()==1):
new_mask = intersect_masks([self.nifti_masker.mask_img,
image.nifti_masker.mask_img],
threshold=1, connected=False)
new_nifti_masker = NiftiMasker(mask_img=new_mask)
data2 = new_nifti_masker.fit_transform(self.to_nifti())
image2 = new_nifti_masker.fit_transform(image.to_nifti())
else:
data2 = self.data
image2 = image.data
def vector2array(data):
if len(data.shape) == 1:
return data.reshape(-1,1).T
else:
return data
def flatten_array(data):
if np.any(np.array(data.shape)==1):
data = data.flatten()
if len(data)==1 & data.shape[0]==1:
data = data[0]
return data
else:
return data
# Calculate pattern expression
if method is 'dot_product':
if len(image2.shape) > 1:
if image2.shape[0] > 1:
pexp = []
for i in range(image2.shape[0]):
pexp.append(np.dot(data2, image2[i, :]))
pexp = np.array(pexp)
else:
pexp = np.dot(data2, image2)
else:
pexp = np.dot(data2, image2)
elif method is 'correlation':
if len(image2.shape) > 1:
if image2.shape[0] > 1:
pexp = []
for i in range(image2.shape[0]):
pexp.append(pearson(image2[i, :], data2))
pexp = np.array(pexp)
else:
pexp = pearson(image2, data2)
else:
pexp = pearson(image2, data2)
elif method is 'cosine':
image2 = vector2array(image2)
data2 = vector2array(data2)
if image2.shape[1] > 1:
pexp = []
for i in range(image2.shape[0]):
pexp.append(cosine_similarity(image2[i, :].reshape(-1,1).T, data2).flatten())
pexp = np.array(pexp)
else:
pexp = cosine_similarity(image2, data2).flatten()
else:
raise ValueError('Method must be one of: correlation, dot_product, cosine')
return flatten_array(pexp)
def distance(self, method='euclidean', **kwargs):
""" Calculate distance between images within a Brain_Data() instance.
Args:
method: type of distance metric (can use any scikit learn or
sciypy metric)
Returns:
dist: Outputs a 2D distance matrix.
"""
return Adjacency(pairwise_distances(self.data, metric=method, **kwargs),
matrix_type='Distance')
def multivariate_similarity(self, images, method='ols'):
""" Predict spatial distribution of Brain_Data() instance from linear
combination of other Brain_Data() instances or Nibabel images
Args:
self: Brain_Data instance of data to be applied
images: Brain_Data instance of weight map
Returns:
out: dictionary of regression statistics in Brain_Data
instances {'beta','t','p','df','residual'}
"""
# Notes: Should add ridge, and lasso, elastic net options options
if len(self.shape()) > 1:
raise ValueError("This method can only decompose a single brain "
"image.")
if not isinstance(images, Brain_Data):
raise ValueError("Images are not a Brain_Data instance")
# Check to make sure masks are the same for each dataset and if not create a union mask
# This might be handy code for a new Brain_Data method
if np.sum(self.nifti_masker.mask_img.get_data() == 1) != np.sum(images.nifti_masker.mask_img.get_data()==1):
new_mask = intersect_masks([self.nifti_masker.mask_img,
images.nifti_masker.mask_img],
threshold=1, connected=False)
new_nifti_masker = NiftiMasker(mask_img=new_mask)
data2 = new_nifti_masker.fit_transform(self.to_nifti())
image2 = new_nifti_masker.fit_transform(images.to_nifti())
else:
data2 = self.data
image2 = images.data
# Add intercept and transpose
image2 = np.vstack((np.ones(image2.shape[1]), image2)).T
# Calculate pattern expression
if method is 'ols':
b = np.dot(np.linalg.pinv(image2), data2)
res = data2 - np.dot(image2, b)
sigma = np.std(res, axis=0)
stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(image2.T,
image2)))**.5).T, np.matrix(sigma))
t_out = b / stderr
df = image2.shape[0]-image2.shape[1]
p = 2*(1-t_dist.cdf(np.abs(t_out), df))
else:
raise NotImplementedError
return {'beta': b, 't': t_out, 'p': p, 'df': df, 'sigma': sigma,
'residual': res}
def predict(self, algorithm=None, cv_dict=None, plot=True, **kwargs):
""" Run prediction
Args:
algorithm: Algorithm to use for prediction. Must be one of 'svm',
'svr', 'linear', 'logistic', 'lasso', 'ridge',
'ridgeClassifier','pcr', or 'lassopcr'
cv_dict: Type of cross_validation to use. A dictionary of
{'type': 'kfolds', 'n_folds': n},
{'type': 'kfolds', 'n_folds': n, 'stratified': Y},
{'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or
{'type': 'loso', 'subject_id': holdout}
where 'n' = number of folds, and 'holdout' = vector of
subject ids that corresponds to self.Y
plot: Boolean indicating whether or not to create plots.
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
Returns:
output: a dictionary of prediction parameters
"""
# Set algorithm
if algorithm is not None:
predictor_settings = set_algorithm(algorithm, **kwargs)
else:
# Use SVR as a default
predictor_settings = set_algorithm('svr', **{'kernel': "linear"})
# Initialize output dictionary
output = {}
output['Y'] = np.array(self.Y).flatten()
# Overall Fit for weight map
predictor = predictor_settings['predictor']
predictor.fit(self.data, output['Y'])
output['yfit_all'] = predictor.predict(self.data)
if predictor_settings['prediction_type'] == 'classification':
if predictor_settings['algorithm'] not in ['svm', 'ridgeClassifier',
'ridgeClassifierCV']:
output['prob_all'] = predictor.predict_proba(self.data)[:, 1]
else:
output['dist_from_hyperplane_all'] = predictor.decision_function(self.data)
if predictor_settings['algorithm'] == 'svm' and predictor.probability:
output['prob_all'] = predictor.predict_proba(self.data)[:, 1]
# Intercept
if predictor_settings['algorithm'] == 'pcr':
output['intercept'] = predictor_settings['_regress'].intercept_
elif predictor_settings['algorithm'] == 'lassopcr':
output['intercept'] = predictor_settings['_lasso'].intercept_
else:
output['intercept'] = predictor.intercept_
# Weight map
output['weight_map'] = self.empty()
if predictor_settings['algorithm'] == 'lassopcr':
output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T, predictor_settings['_lasso'].coef_)
elif predictor_settings['algorithm'] == 'pcr':
output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T, predictor_settings['_regress'].coef_)
else:
output['weight_map'].data = predictor.coef_.squeeze()
# Cross-Validation Fit
if cv_dict is not None:
cv = set_cv(Y=self.Y, cv_dict=cv_dict)
predictor_cv = predictor_settings['predictor']
output['yfit_xval'] = output['yfit_all'].copy()
output['intercept_xval'] = []
output['weight_map_xval'] = output['weight_map'].copy()
output['cv_idx'] = []
wt_map_xval = []
if predictor_settings['prediction_type'] == 'classification':
if predictor_settings['algorithm'] not in ['svm', 'ridgeClassifier', 'ridgeClassifierCV']:
output['prob_xval'] = np.zeros(len(self.Y))
else:
output['dist_from_hyperplane_xval'] = np.zeros(len(self.Y))
if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
output['prob_xval'] = np.zeros(len(self.Y))
for train, test in cv:
predictor_cv.fit(self.data[train], self.Y.loc[train])
output['yfit_xval'][test] = predictor_cv.predict(self.data[test]).ravel()
if predictor_settings['prediction_type'] == 'classification':
if predictor_settings['algorithm'] not in ['svm', 'ridgeClassifier', 'ridgeClassifierCV']:
output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:, 1]
else:
output['dist_from_hyperplane_xval'][test] = predictor_cv.decision_function(self.data[test])
if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:, 1]
# Intercept
if predictor_settings['algorithm'] == 'pcr':
output['intercept_xval'].append(predictor_settings['_regress'].intercept_)
elif predictor_settings['algorithm'] == 'lassopcr':
output['intercept_xval'].append(predictor_settings['_lasso'].intercept_)
else:
output['intercept_xval'].append(predictor_cv.intercept_)
output['cv_idx'].append((train,test))
# Weight map
if predictor_settings['algorithm'] == 'lassopcr':
wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T, predictor_settings['_lasso'].coef_))
elif predictor_settings['algorithm'] == 'pcr':
wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T, predictor_settings['_regress'].coef_))
else:
wt_map_xval.append(predictor_cv.coef_.squeeze())
output['weight_map_xval'].data = np.array(wt_map_xval)
# Print Results
if predictor_settings['prediction_type'] == 'classification':
output['mcr_all'] = np.mean(output['yfit_all'] == np.array(self.Y).flatten())
print('overall accuracy: %.2f' % output['mcr_all'])
if cv_dict is not None:
output['mcr_xval'] = np.mean(output['yfit_xval'] == np.array(self.Y).flatten())
print('overall CV accuracy: %.2f' % output['mcr_xval'])
elif predictor_settings['prediction_type'] == 'prediction':
output['rmse_all'] = np.sqrt(np.mean((output['yfit_all']-output['Y'])**2))
output['r_all'] = np.corrcoef(output['Y'], output['yfit_all'])[0, 1]
print('overall Root Mean Squared Error: %.2f' % output['rmse_all'])
print('overall Correlation: %.2f' % output['r_all'])
if cv_dict is not None:
output['rmse_xval'] = np.sqrt(np.mean((output['yfit_xval']-output['Y'])**2))
output['r_xval'] = np.corrcoef(output['Y'],output['yfit_xval'])[0, 1]
print('overall CV Root Mean Squared Error: %.2f' % output['rmse_xval'])
print('overall CV Correlation: %.2f' % output['r_xval'])
# Plot
if plot:
if cv_dict is not None:
if predictor_settings['prediction_type'] == 'prediction':
scatterplot(pd.DataFrame({'Y': output['Y'], 'yfit_xval': output['yfit_xval']}))
elif predictor_settings['prediction_type'] == 'classification':
if predictor_settings['algorithm'] not in ['svm', 'ridgeClassifier', 'ridgeClassifierCV']:
output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool'))
else:
output['roc'] = Roc(input_values=output['dist_from_hyperplane_xval'], binary_outcome=output['Y'].astype('bool'))
if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool'))
output['roc'].plot()
output['weight_map'].plot()
return output
def apply_mask(self, mask):
""" Mask Brain_Data instance
Args:
mask: mask (Brain_Data or nifti object)
"""
if isinstance(mask, Brain_Data):
mask = mask.to_nifti() # convert to nibabel
if not isinstance(mask, nib.Nifti1Image):
if isinstance(mask, six.string_types):
if os.path.isfile(mask):
mask = nib.load(mask)
if not ((self.mask.get_affine() == mask.get_affine()).all()) & (self.mask.shape[0:3] == mask.shape[0:3]):
mask = resample_img(mask, target_affine=self.mask.get_affine(), target_shape=self.mask.shape)
else:
raise ValueError("Mask is not a nibabel instance, Brain_Data "
"instance, or a valid file name.")
masked = deepcopy(self)
nifti_masker = NiftiMasker(mask_img=mask)
masked.data = nifti_masker.fit_transform(self.to_nifti())
masked.nifti_masker = nifti_masker
if (len(masked.shape()) > 1) & (masked.shape()[0] == 1):
masked.data = masked.data.flatten()
return masked
def extract_roi(self, mask, method='mean'):
""" Extract activity from mask
Args:
mask: nibabel mask can be binary or numbered for different rois
method: type of extraction method (default=mean)
Returns:
out: mean within each ROI across images
"""
if not isinstance(mask, Brain_Data):
if isinstance(mask, nib.Nifti1Image):
mask = Brain_Data(mask)
else:
raise ValueError('Make sure mask is a Brain_Data or nibabel '
'instance')
ma = mask.copy()
if len(np.unique(ma.data)) == 2:
if method is 'mean':
out = np.mean(self.data[:, np.where(ma.data)].squeeze(), axis=1)
elif len(np.unique(ma.data)) > 2:
# make sure each ROI id is an integer
ma.data = np.round(ma.data).astype(int)
all_mask = expand_mask(ma)
out = []
for i in range(all_mask.shape()[0]):
if method is 'mean':
out.append(np.mean(self.data[:, np.where(all_mask[i].data)].squeeze(),axis=1))
out = np.array(out)
return out
def icc(self, icc_type='icc2'):
''' Calculate intraclass correlation coefficient for data within
Brain_Data class
ICC Formulas are based on:
Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations: uses in
assessing rater reliability. Psychological bulletin, 86(2), 420.
icc1: x_ij = mu + beta_j + w_ij
icc2/3: x_ij = mu + alpha_i + beta_j + (ab)_ij + epsilon_ij
Code modifed from nipype algorithms.icc
https://github.com/nipy/nipype/blob/master/nipype/algorithms/icc.py
Args:
icc_type: type of icc to calculate (icc: voxel random effect,
icc2: voxel and column random effect, icc3: voxel and
column fixed effect)
Returns:
ICC: intraclass correlation coefficient
'''
Y = self.data.T
[n, k] = Y.shape
# Degrees of Freedom
dfc = k - 1
dfe = (n - 1) * (k-1)
dfr = n - 1
# Sum Square Total
mean_Y = np.mean(Y)
SST = ((Y - mean_Y) ** 2).sum()
# create the design matrix for the different levels
x = np.kron(np.eye(k), np.ones((n, 1))) # sessions
x0 = np.tile(np.eye(n), (k, 1)) # subjects
X = np.hstack([x, x0])
# Sum Square Error
predicted_Y = np.dot(np.dot(np.dot(X, np.linalg.pinv(np.dot(X.T, X))),
X.T), Y.flatten('F'))
residuals = Y.flatten('F') - predicted_Y
SSE = (residuals ** 2).sum()
MSE = SSE / dfe
# Sum square column effect - between colums
SSC = ((np.mean(Y, 0) - mean_Y) ** 2).sum() * n
MSC = SSC / dfc / n
# Sum Square subject effect - between rows/subjects
SSR = SST - SSC - SSE
MSR = SSR / dfr
if icc_type == 'icc1':
# ICC(2,1) = (mean square subject - mean square error) /
# (mean square subject + (k-1)*mean square error +
# k*(mean square columns - mean square error)/n)
# ICC = (MSR - MSRW) / (MSR + (k-1) * MSRW)
NotImplementedError("This method isn't implemented yet.")
elif icc_type == 'icc2':
# ICC(2,1) = (mean square subject - mean square error) /
# (mean square subject + (k-1)*mean square error +
# k*(mean square columns - mean square error)/n)
ICC = (MSR - MSE) / (MSR + (k-1) * MSE + k * (MSC - MSE) / n)
elif icc_type == 'icc3':
# ICC(3,1) = (mean square subject - mean square error) /
# (mean square subject + (k-1)*mean square error)
ICC = (MSR - MSE) / (MSR + (k-1) * MSE)
return ICC
def detrend(self, method='linear'):
""" Remove linear trend from each voxel
Args:
type: {'linear','constant'} optional
Returns:
out: detrended Brain_Data instance
"""
if len(self.shape()) == 1:
raise ValueError('Make sure there is more than one image in order '
'to detrend.')
out = deepcopy(self)
out.data = detrend(out.data, type=method, axis=0)
return out
def copy(self):
""" Create a copy of a Brain_Data instance. """
return deepcopy(self)
def upload_neurovault(self, access_token=None, collection_name=None,
collection_id=None, img_type=None, img_modality=None,
**kwargs):
""" Upload Data to Neurovault. Will add any columns in self.X to image
metadata. Index will be used as image name.
Args:
access_token: (Required) Neurovault api access token
collection_name: (Optional) name of new collection to create
collection_id: (Optional) neurovault collection_id if adding images
to existing collection
img_type: (Required) Neurovault map_type
img_modality: (Required) Neurovault image modality
Returns:
collection: neurovault collection information
"""
if access_token is None:
raise ValueError('You must supply a valid neurovault access token')
api = Client(access_token=access_token)
# Check if collection exists
if collection_id is not None:
collection = api.get_collection(collection_id)
else:
try:
collection = api.create_collection(collection_name)
except ValueError:
print('Collection Name already exists. Pick a '
'different name or specify an existing collection id')
tmp_dir = os.path.join(tempfile.gettempdir(), str(os.times()[-1]))
os.makedirs(tmp_dir)
def add_image_to_collection(api, collection, dat, tmp_dir, index_id=0,
**kwargs):
'''Upload image to collection
Args:
api: pynv Client instance
collection: collection information
dat: Brain_Data instance to upload
tmp_dir: temporary directory
index_id: (int) index for file naming
'''
if (len(dat.shape()) > 1) & (dat.shape()[0] > 1):
raise ValueError('"dat" must be a single image.')
if not dat.X.empty:
if isinstance(dat.X.name, six.string_types):
img_name = dat.X.name
else:
img_name = collection['name'] + '_' + str(index_id) + '.nii.gz'
else:
img_name = collection['name'] + '_' + str(index_id) + '.nii.gz'
f_path = os.path.join(tmp_dir, img_name)
dat.write(f_path)
if not dat.X.empty:
kwargs.update(dict([(k, dat.X.loc[k]) for k in dat.X.keys()]))
api.add_image(collection['id'],
f_path,
name=img_name,
modality=img_modality,
map_type=img_type,
**kwargs)
if len(self.shape()) == 1:
add_image_to_collection(api, collection, self, tmp_dir, index_id=0,
**kwargs)
else:
for i, x in enumerate(self):
add_image_to_collection(api, collection, x, tmp_dir,
index_id=i, **kwargs)
shutil.rmtree(tmp_dir, ignore_errors=True)
return collection
def r_to_z(self):
''' Apply Fisher's r to z transformation to each element of the data
object.'''
out = self.copy()
out.data = fisher_r_to_z(out.data)
return out
def filter(self,sampling_freq=None, high_pass=None,low_pass=None,**kwargs):
''' Apply 5th order butterworth filter to data. Wraps nilearn functionality. Does not default to detrending and standardizing like nilearn implementation, but this can be overridden using kwargs.
Args:
sampling_freq: sampling freq in hertz (i.e. 1 / TR)
high_pass: high pass cutoff frequency
low_pass: low pass cutoff frequency
kwargs: other keyword arguments to nilearn.signal.clean
Returns:
Brain_Data: Filtered Brain_Data instance
'''
if sampling_freq is None:
raise ValueError("Need to provide sampling rate (TR)!")
if high_pass is None and low_pass is None:
raise ValueError("high_pass and/or low_pass cutoff must be"
"provided!")
if sampling_freq is None:
raise ValueError("Need to provide TR!")
standardize = kwargs.get('standardize',False)
detrend = kwargs.get('detrend',False)
out = self.copy()
out.data = clean(out.data,t_r= 1. / sampling_freq,detrend=detrend,standardize=standardize,high_pass=high_pass,low_pass=low_pass,**kwargs)
return out
def dtype(self):
''' Get data type of Brain_Data.data.'''
return self.data.dtype
def astype(self, dtype):
''' Cast Brain_Data.data as type.
Args:
dtype: datatype to convert
Returns:
Brain_Data: Brain_Data instance with new datatype
'''
out = self.copy()
out.data = out.data.astype(dtype)
return out
def standardize(self, method='center'):
''' Standardize Brain_Data() instance.
Args:
method: ['center','zscore']
Returns:
Brain_Data Instance
'''
out = self.copy()
if method is 'center':
out.data = out.data - np.repeat(np.array([np.mean(out.data, axis=0)]).T, len(out), axis=1).T
elif method is 'zscore':
out.data = out.data - np.repeat(np.array([np.mean(out.data, axis=0)]).T, len(out), axis=1).T
out.data = out.data/np.repeat(np.array([np.std(out.data, axis=0)]).T, len(out), axis=1).T
else:
raise ValueError('method must be ["center","zscore"')
return out
def groupby(self, mask):
'''Create groupby instance'''
return Groupby(self, mask)
def aggregate(self, mask, func):
'''Create new Brain_Data instance that aggregages func over mask'''
dat = self.groupby(mask)
values = dat.apply(func)
return dat.combine(values)
def threshold(self, upper=None, lower=None, binarize=False):
'''Threshold Brain_Data instance. Provide upper and lower values or
percentages to perform two-sided thresholding. Binarize will return
a mask image respecting thresholds if provided, otherwise respecting
every non-zero value.
Args:
upper: (float or str) Upper cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
lower: (float or str) Lower cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
binarize (bool): return binarized image respecting thresholds if
provided, otherwise binarize on every non-zero value;
default False
Returns:
Thresholded Brain_Data object.
'''
b = self.copy()
if isinstance(upper, six.string_types):
if upper[-1] is '%':
upper = np.percentile(b.data, float(upper[:-1]))
if isinstance(lower, six.string_types):
if lower[-1] is '%':
lower = np.percentile(b.data, float(lower[:-1]))
if upper and lower:
b.data[(b.data < upper) & (b.data > lower)] = 0
elif upper and not lower:
b.data[b.data < upper] = 0
elif lower and not upper:
b.data[b.data > lower] = 0
if binarize:
b.data[b.data != 0] = 1
return b
def regions(self, min_region_size=1350, extract_type='local_regions',
smoothing_fwhm=6, is_mask=False):
''' Extract brain connected regions into separate regions.
Args:
min_region_size (int): Minimum volume in mm3 for a region to be
kept.
extract_type (str): Type of extraction method
['connected_components', 'local_regions'].
If 'connected_components', each component/region
in the image is extracted automatically by
labelling each region based upon the presence of
unique features in their respective regions.
If 'local_regions', each component/region is
extracted based on their maximum peak value to
define a seed marker and then using random
walker segementation algorithm on these
markers for region separation.
smoothing_fwhm (scalar): Smooth an image to extract more sparser
regions. Only works for extract_type
'local_regions'.
is_mask (bool): Whether the Brain_Data instance should be treated
as a boolean mask and if so, calls
connected_label_regions instead.
Returns:
Brain_Data: Brain_Data instance with extracted ROIs as data.
'''
if is_mask:
regions, _ = connected_label_regions(self.to_nifti())
else:
regions, _ = connected_regions(self.to_nifti(),
min_region_size, extract_type,
smoothing_fwhm)
return Brain_Data(regions, mask=self.mask)
def transform_pairwise(self):
''' Extract brain connected regions into separate regions.
Args:
Returns:
Brain_Data: Brain_Data instance tranformed into pairwise comparisons
'''
out = self.copy()
out.data, new_Y = transform_pairwise(self.data,self.Y)
out.Y = pd.DataFrame(new_Y)
out.Y.replace(-1,0,inplace=True)
return out
def bootstrap(self, function, n_samples=5000, save_weights=False,
n_jobs=-1, random_state=None, *args, **kwargs):
'''Bootstrap a Brain_Data method.
Example Useage:
b = dat.bootstrap('mean', n_samples=5000)
b = dat.bootstrap('predict', n_samples=5000, algorithm='ridge')
b = dat.bootstrap('predict', n_samples=5000, save_weights=True)
Args:
function: (str) method to apply to data for each bootstrap
n_samples: (int) number of samples to bootstrap with replacement
save_weights: (bool) Save each bootstrap iteration
(useful for aggregating many bootstraps on a cluster)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.Returns:
output: summarized studentized bootstrap output
'''
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_samples)
bootstrapped = Parallel(n_jobs=n_jobs)(
delayed(_bootstrap_apply_func)(self,
function, random_state=seeds[i], *args, **kwargs)
for i in range(n_samples))
if function is 'predict':
bootstrapped = [x['weight_map'] for x in bootstrapped]
bootstrapped = Brain_Data(bootstrapped)
return summarize_bootstrap(bootstrapped, save_weights=save_weights)
def decompose(self, algorithm='pca', axis='voxels', n_components=None,
*args, **kwargs):
''' Decompose Brain_Data object
Args:
algorithm: (str) Algorithm to perform decomposition
types=['pca','ica','nnmf','fa']
axis: dimension to decompose ['voxels','images']
n_components: (int) number of components. If None then retain
as many as possible.
Returns:
output: a dictionary of decomposition parameters
'''
out = {}
out['decomposition_object'] = set_decomposition_algorithm(
algorithm=algorithm,
n_components=n_components,
*args, **kwargs)
if axis is 'images':
out['decomposition_object'].fit(self.data.T)
out['components'] = self.empty()
out['components'].data = out['decomposition_object'].transform(
self.data.T).T
out['weights'] = out['decomposition_object'].components_.T
if axis is 'voxels':
out['decomposition_object'].fit(self.data)
out['weights'] = out['decomposition_object'].transform(self.data)
out['components'] = self.empty()
out['components'].data = out['decomposition_object'].components_
return out
def align(self, target, method='procrustes', n_features=None, axis=0,
*args, **kwargs):
''' Align Brain_Data instance to target object
Can be used to hyperalign source data to target data using
Hyperalignemnt from Dartmouth (i.e., procrustes transformation; see
nltools.stats.procrustes) or Shared Response Model from Princeton (see
nltools.external.srm). (see nltools.stats.align for aligning many data
objects together). Common Model is shared response model or centered
target data.Transformed data can be back projected to original data
using Tranformation matrix.
Examples:
Hyperalign using procrustes transform:
out = data.align(target, method='procrustes')
Align using shared response model:
out = data.align(target, method='probabilistic_srm', n_features=None)
Project aligned data into original data:
original_data = np.dot(out['transformed'].data,out['transformation_matrix'].T)
Args:
target: (Brain_Data) object to align to.
method: (str) alignment method to use
['probabilistic_srm','deterministic_srm','procrustes']
n_features: (int) number of features to align to common space.
If None then will select number of voxels
axis: (int) axis to align on
Returns:
out: (dict) a dictionary containing transformed object,
transformation matrix, and the shared response matrix
'''
source = self.copy()
common = target.copy()
if not isinstance(target, Brain_Data):
raise ValueError("Target must be Brain_Data instance.")
if method not in ['probabilistic_srm', 'deterministic_srm','procrustes']:
raise ValueError("Method must be ['probabilistic_srm','deterministic_srm','procrustes']")
data1 = source.data.T
data2 = target.data.T
if axis == 1:
data1 = data1.T
data2 = data2.T
out = dict()
if method in ['deterministic_srm', 'probabilistic_srm']:
if n_features is None:
n_features = data1.shape[0]
if method == 'deterministic_srm':
srm = DetSRM(features=n_features, *args, **kwargs)
elif method == 'probabilistic_srm':
srm = SRM(features=n_features, *args, **kwargs)
srm.fit([data1, data2])
source.data = srm.transform([data1, data2])[0].T
common.data = srm.s_.T
out['transformed'] = source
out['common_model'] = common
out['transformation_matrix'] = srm.w_[0]
elif method == 'procrustes':
if n_features != None:
raise NotImplementedError('Currently must use all voxels.'
'Eventually will add a PCA'
'reduction, must do this manually'
'for now.')
mtx1, mtx2, out['disparity'], t, out['scale'] = procrustes(data2.T,
data1.T)
source.data = mtx2
common.data = mtx1
out['transformed'] = source
out['common_model'] = common
out['transformation_matrix'] = t
if axis == 1:
out['transformed'].data = out['transformed'].data.T
out['common_model'].data = out['common_model'].data.T
return out
class Groupby(object):
def __init__(self, data, mask):
if not isinstance(data, Brain_Data):
raise ValueError('Groupby requires a Brain_Data instance.')
if not isinstance(mask, Brain_Data):
if isinstance(mask, nib.Nifti1Image):
mask = Brain_Data(mask)
else:
raise ValueError('mask must be a Brain_Data instance.')
mask.data = np.round(mask.data).astype(int)
if len(mask.shape()) <= 1:
if len(np.unique(mask.data)) > 2:
mask = expand_mask(mask)
else:
raise ValueError('mask does not have enough groups.')
self.mask = mask
self.split(data, mask)
def __repr__(self):
return '%s.%s(len=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
len(self),
)
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield (x, self.data[x])
def __getitem__(self, index):
if isinstance(index, int):
return self.data[index]
else:
raise ValueError('Groupby currently only supports integer indexing')
def split(self, data, mask):
'''Split Brain_Data instance into separate masks and store as a
dictionary.
'''
self.data = {}
for i, m in enumerate(mask):
self.data[i] = data.apply_mask(m)
def apply(self, method):
'''Apply Brain_Data instance methods to each element of Groupby
object.
'''
return dict([(i, getattr(x, method)()) for i, x in self])
def combine(self, value_dict):
'''Combine value dictionary back into masks'''
out = self.mask.copy().astype(float)
for i in iter(value_dict.keys()):
if isinstance(value_dict[i], Brain_Data):
if value_dict[i].shape()[0] == np.sum(self.mask[i].data):
out.data[i, out.data[i, :] == 1] = value_dict[i].data
else:
raise ValueError('Brain_Data instances are different '
'shapes.')
elif isinstance(value_dict[i], (float, int, bool, np.number)):
out.data[i, :] = out.data[i, :]*value_dict[i]
else:
raise ValueError('No method for aggregation implented for %s '
'yet.' % type(value_dict[i]))
return out.sum()
| mit |
sharafcode/EM-Algorithm-for-Text-clustering-using-sickit-learn- | em_utilities.py | 1 | 5403 | from scipy.sparse import csr_matrix
from scipy.sparse import spdiags
from scipy.stats import multivariate_normal
#import graphlab
import numpy as np
import sys
import time
from copy import deepcopy
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize , OneHotEncoder
def sframe_to_scipy(x, column_name):
'''
Convert a dictionary column of an SFrame into a sparse matrix format where
each (row_id, column_id, value) triple corresponds to the value of
x[row_id][column_id], where column_id is a key in the dictionary.
Example
>>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)
'''
assert x[column_name].dtype() == dict, \
'The chosen column must be dict type, representing sparse data.'
# Create triples of (row_id, feature_id, count).
# 1. Add a row number.
x = x.add_row_number()
# 2. Stack will transform x to have a row for each unique (row, key) pair.
x = x.stack(column_name, ['feature', 'value'])
# Map words into integers using a OneHotEncoder feature transformation.
f = OneHotEncoder(features=['feature'])
# 1. Fit the transformer using the above data.
f.fit(x)
# 2. The transform takes 'feature' column and adds a new column 'feature_encoding'.
x = f.transform(x)
# 3. Get the feature mapping.
mapping = f['feature_encoding']
# 4. Get the feature id to use for each key.
x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])
# Create numpy arrays that contain the data for the sparse matrix.
i = np.array(x['id'])
j = np.array(x['feature_id'])
v = np.array(x['value'])
width = x['id'].max() + 1
height = x['feature_id'].max() + 1
# Create a sparse matrix.
mat = csr_matrix((v, (i, j)), shape=(width, height))
return mat, mapping
def diag(array):
n = len(array)
return spdiags(array, 0, n, n)
def logpdf_diagonal_gaussian(x, mean, cov):
'''
Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x.
A multivariate Gaussian distribution with a diagonal covariance is equivalent
to a collection of independent Gaussian random variables.
x should be a sparse matrix. The logpdf will be computed for each row of x.
mean and cov should be given as 1D numpy arrays
mean[i] : mean of i-th variable
cov[i] : variance of i-th variable'''
n = x.shape[0]
dim = x.shape[1]
assert(dim == len(mean) and dim == len(cov))
# multiply each i-th column of x by (1/(2*sigma_i)), where sigma_i is sqrt of variance of i-th variable.
scaled_x = x.dot( diag(1./(2*np.sqrt(cov))) )
# multiply each i-th entry of mean by (1/(2*sigma_i))
scaled_mean = mean/(2*np.sqrt(cov))
# sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)]
return -np.sum(np.log(np.sqrt(2*np.pi*cov))) - pairwise_distances(scaled_x, [scaled_mean], 'euclidean').flatten()**2
def log_sum_exp(x, axis):
'''Compute the log of a sum of exponentials'''
x_max = np.max(x, axis=axis)
if axis == 1:
return x_max + np.log( np.sum(np.exp(x-x_max[:,np.newaxis]), axis=1) )
else:
return x_max + np.log( np.sum(np.exp(x-x_max), axis=0) )
def EM_for_high_dimension(data, means, covs, weights, cov_smoothing=1e-5, maxiter=int(1e3), thresh=1e-4, verbose=False):
# cov_smoothing: specifies the default variance assigned to absent features in a cluster.
# If we were to assign zero variances to absent features, we would be overconfient,
# as we hastily conclude that those featurese would NEVER appear in the cluster.
# We'd like to leave a little bit of possibility for absent features to show up later.
n = data.shape[0]
dim = data.shape[1]
mu = deepcopy(means)
Sigma = deepcopy(covs)
K = len(mu)
weights = np.array(weights)
ll = None
ll_trace = []
for i in range(maxiter):
# E-step: compute responsibilities
logresp = np.zeros((n,K))
for k in xrange(K):
logresp[:,k] = np.log(weights[k]) + logpdf_diagonal_gaussian(data, mu[k], Sigma[k])
ll_new = np.sum(log_sum_exp(logresp, axis=1))
if verbose:
print(ll_new)
sys.stdout.flush()
logresp -= np.vstack(log_sum_exp(logresp, axis=1))
resp = np.exp(logresp)
counts = np.sum(resp, axis=0)
# M-step: update weights, means, covariances
weights = counts / np.sum(counts)
for k in range(K):
mu[k] = (diag(resp[:,k]).dot(data)).sum(axis=0)/counts[k]
mu[k] = mu[k].A1
Sigma[k] = diag(resp[:,k]).dot( data.multiply(data)-2*data.dot(diag(mu[k])) ).sum(axis=0) \
+ (mu[k]**2)*counts[k]
Sigma[k] = Sigma[k].A1 / counts[k] + cov_smoothing*np.ones(dim)
# check for convergence in log-likelihood
ll_trace.append(ll_new)
if ll is not None and (ll_new-ll) < thresh and ll_new > -np.inf:
ll = ll_new
break
else:
ll = ll_new
out = {'weights':weights,'means':mu,'covs':Sigma,'loglik':ll_trace,'resp':resp}
return out
| mit |
ltiao/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 67 | 9084 | import numpy as np
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
openpathsampling/openpathsampling | openpathsampling/analysis/tis/flux.py | 3 | 14062 | import collections
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject
import pandas as pd
import numpy as np
from .core import MultiEnsembleSamplingAnalyzer
def flux_matrix_pd(flux_matrix, sort_method="default"):
"""Convert dict form of flux to a pandas.Series
Parameters
----------
flux_matrix : dict of {(state, interface): flux}
the output of a flux calculation; flux out of state and through
interface
sort_method : callable or str
method that takes a list of 2-tuple key from flux_matrix and returns
a sorted list. Strings can be used to select internally-defined
methods. Currently implemented: "default"
(:meth:`.default_flux_sort`).
Returns
-------
:class:`pandas.Series` :
The flux represented in a pandas series
"""
keys = list(flux_matrix.keys())
known_method_names = {
'default': default_flux_sort
}
if isinstance(sort_method, str):
try:
sort_method = known_method_names[sort_method.lower()]
except KeyError:
raise KeyError("Unknown sort_method name: " + str(sort_method))
if sort_method is not None:
ordered = sort_method(keys)
else:
ordered = keys
values = [flux_matrix[k] for k in ordered]
index_vals = [(k[0].name, k[1].name) for k in ordered]
index = pd.MultiIndex.from_tuples(list(index_vals),
names=["State", "Interface"])
return pd.Series(values, index=index, name="Flux")
def default_flux_sort(tuple_list):
"""Default sort for flux pairs.
Flux results are reported in terms of flux pairs like ``(state,
interface)``. This sorts them using the ``.name`` strings for the
volumes.
"""
name_to_volumes = {(t[0].name, t[1].name): t for t in tuple_list}
sorted_results = sorted(name_to_volumes.keys())
return [name_to_volumes[key] for key in sorted_results]
class MinusMoveFlux(MultiEnsembleSamplingAnalyzer):
"""
Calculating the flux from the minus move.
Raises
------
ValueError
if the number of interface sets per minus move is greater than one.
Cannot use Minus Move flux calculation with multiple interface set
TIS.
Parameters
----------
scheme: :class:`.MoveScheme`
move scheme that was used (includes information on the minus movers
and on the network)
flux_pairs: list of 2-tuple of :class:`.Volume`
pairs of (state, interface) for calculating the flux out of the
volume and through the state. Default is `None`, in which case the
state and innermost interface are used.
"""
def __init__(self, scheme, flux_pairs=None):
super(MinusMoveFlux, self).__init__()
# error string we'll re-use in a few places
mistis_err_str = ("Cannot use minus move flux with multiple "
+ "interface sets. ")
self.scheme = scheme
self.network = scheme.network
self.minus_movers = scheme.movers['minus']
for mover in self.minus_movers:
n_innermost = len(mover.innermost_ensembles)
if n_innermost != 1:
raise ValueError(
mistis_err_str + "Mover " + str(mover) + " does not "
+ "have exactly one innermost ensemble. Found "
+ str(len(mover.innermost_ensembles)) + ")."
)
if flux_pairs is None:
# get flux_pairs from network
flux_pairs = []
minus_ens_to_trans = self.network.special_ensembles['minus']
for minus_ens in self.network.minus_ensembles:
n_trans = len(minus_ens_to_trans[minus_ens])
if n_trans > 1: # pragma: no cover
# Should have been caught be the previous ValueError. If
# you hit this, something unexpected happened.
raise ValueError(mistis_err_str + "Ensemble "
+ repr(minus_ens) + " connects "
+ str(n_trans) + " transitions.")
trans = minus_ens_to_trans[minus_ens][0]
innermost = trans.interfaces[0]
state = trans.stateA
# a couple assertions as a sanity check
assert minus_ens.state_vol == state
assert minus_ens.innermost_vol == innermost
flux_pairs.append((state, innermost))
self.flux_pairs = flux_pairs
def _get_minus_steps(self, steps):
"""
Selects steps that used this object's minus movers
"""
return [s for s in steps
if s.change.canonical.mover in self.minus_movers
and s.change.accepted]
def trajectory_transition_flux_dict(self, minus_steps):
"""
Main minus move-based flux analysis routine.
Parameters
----------
minus_steps: list of :class:`.MCStep`
steps that used the minus movers
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:meth:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
"""
# set up a few mappings that make it easier set up other things
flux_pair_to_transition = {
(trans.stateA, trans.interfaces[0]): trans
for trans in self.network.sampling_transitions
}
flux_pair_to_minus_mover = {
(m.minus_ensemble.state_vol, m.minus_ensemble.innermost_vol): m
for m in self.minus_movers
}
minus_mover_to_flux_pair = {flux_pair_to_minus_mover[k]: k
for k in flux_pair_to_minus_mover}
flux_pair_to_minus_ensemble = {
(minus_ens.state_vol, minus_ens.innermost_vol): minus_ens
for minus_ens in self.network.minus_ensembles
}
# sanity checks -- only run once per analysis, so keep them in
for pair in self.flux_pairs:
assert pair in flux_pair_to_transition.keys()
assert pair in flux_pair_to_minus_mover.keys()
assert len(self.flux_pairs) == len(minus_mover_to_flux_pair)
# organize the steps by mover used
mover_to_steps = collections.defaultdict(list)
for step in minus_steps:
mover_to_steps[step.change.canonical.mover].append(step)
# create the actual TrajectoryTransitionAnalysis objects to use
transition_flux_calculators = {
k: paths.TrajectoryTransitionAnalysis(
transition=flux_pair_to_transition[k],
dt=flux_pair_to_minus_mover[k].engine.snapshot_timestep
)
for k in self.flux_pairs
}
# do the analysis
results = {}
flux_pairs = self.progress(self.flux_pairs, desc="Flux")
for flux_pair in flux_pairs:
(state, innermost) = flux_pair
mover = flux_pair_to_minus_mover[flux_pair]
calculator = transition_flux_calculators[flux_pair]
minus_ens = flux_pair_to_minus_ensemble[flux_pair]
# TODO: this won't work for SR minus, I don't think
# (but neither would our old version)
trajectories = [s.active[minus_ens].trajectory
for s in mover_to_steps[mover]]
mover_trajs = self.progress(trajectories, leave=False)
results[flux_pair] = calculator.analyze_flux(
trajectories=mover_trajs,
state=state,
interface=innermost
)
return results
@staticmethod
def from_trajectory_transition_flux_dict(flux_dicts):
"""Load from existing TrajectoryTransitionAnalysis calculations.
Parameters
----------
flux_dicts: dict of {(:class:`.Volume`, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:meth:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
TTA = paths.TrajectoryTransitionAnalysis # readability on 80 col
return {k: TTA.flux_from_flux_dict(flux_dicts[k])
for k in flux_dicts}
def from_weighted_trajectories(self, input_dict):
"""Not implemented for flux calculation."""
# this can't be done, e.g., in the case of the single replica minus
# mover, where the minus trajectory isn't in the active samples
raise NotImplementedError(
"Can not calculate minus move from weighted trajectories."
)
def calculate(self, steps):
"""Perform the analysis, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
intermediates = self.intermediates(steps)
return self.calculate_from_intermediates(*intermediates)
def intermediates(self, steps):
"""Calculate intermediates, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
list (len 1) of dict of {(:class:`.Volume`, :class:`.Volume`): dict}
keys are (state, interface); values are the result dict from
:meth:`.TrajectoryTransitionAnalysis.analyze_flux` (keys are
strings 'in' and 'out', mapping to
:class:`.TrajectorySegmentContainer` with appropriate frames.
"""
minus_steps = self._get_minus_steps(steps)
return [self.trajectory_transition_flux_dict(minus_steps)]
def calculate_from_intermediates(self, *intermediates):
"""Perform the analysis, using intermediates as input.
Parameters
----------
intermediates :
output of :meth:`.intermediates`
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
flux_dicts = intermediates[0]
return self.from_trajectory_transition_flux_dict(flux_dicts)
class DictFlux(MultiEnsembleSamplingAnalyzer):
"""Pre-calculated flux, provided as a dict.
Parameters
----------
flux_dict: dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface) pairs; values are associated flux
"""
def __init__(self, flux_dict):
super(DictFlux, self).__init__()
self.flux_dict = flux_dict
def calculate(self, steps):
"""Perform the analysis, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
def from_weighted_trajectories(self, input_dict):
"""Calculate results from weighted trajectories dictionary.
For :class:`.DictFlux`, this ignores the input.
Parameters
----------
input_dict : dict of {:class:`.Ensemble`: collections.Counter}
ensemble as key, and a counter mapping each trajectory
associated with that ensemble to its counter of time spent in
the ensemble.
Returns
-------
dict of {(:class:`.Volume`, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
def intermediates(self, steps):
"""Calculate intermediates, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
Returns
-------
list
empty list; the method is a placeholder for this class
"""
return []
def calculate_from_intermediates(self, *intermediates):
"""Perform the analysis, using intermediates as input.
Parameters
----------
intermediates :
output of :meth:`.intermediates`
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
return self.flux_dict
@staticmethod
def combine_results(result_1, result_2):
"""Combine two sets of results from this analysis.
For :class:`.DictFlux`, the results must be identical.
Parameters
----------
result_1 : dict of {(:class:`.Volume, :class:`.Volume`): float}
first set of results from a flux calculation
result_2 : dict of {(:class:`.Volume, :class:`.Volume`): float}
second set of results from a flux calculation
Returns
-------
dict of {(:class:`.Volume, :class:`.Volume`): float}
keys are (state, interface); values are the associated flux
"""
if result_1 != result_2:
raise RuntimeError("Combining results from different DictFlux")
return result_1
| mit |
jforbess/pvlib-python | pvlib/solarposition.py | 2 | 24131 | """
Calculate the solar position using a variety of methods/packages.
"""
# Contributors:
# Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Will Holmgren (@wholmgren), University of Arizona, 2014
# Tony Lorenzo (@alorenzo175), University of Arizona, 2015
from __future__ import division
import os
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime as dt
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
import pandas as pd
from pvlib.tools import localize_to_utc, datetime_to_djd, djd_to_datetime
def get_solarposition(time, location, method='nrel_numpy', pressure=101325,
temperature=12, **kwargs):
"""
A convenience wrapper for the solar position calculators.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
method : string
'pyephem' uses the PyEphem package: :func:`pyephem`
'nrel_c' uses the NREL SPA C code [3]: :func:`spa_c`
'nrel_numpy' uses an implementation of the NREL SPA algorithm
described in [1] (default): :func:`spa_python`
'nrel_numba' uses an implementation of the NREL SPA algorithm
described in [1], but also compiles the code first: :func:`spa_python`
'ephemeris' uses the pvlib ephemeris code: :func:`ephemeris`
pressure : float
Pascals.
temperature : float
Degrees C.
Other keywords are passed to the underlying solar position function.
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
"""
method = method.lower()
if isinstance(time, dt.datetime):
time = pd.DatetimeIndex([time, ])
if method == 'nrel_c':
ephem_df = spa_c(time, location, pressure, temperature, **kwargs)
elif method == 'nrel_numba':
ephem_df = spa_python(time, location, pressure, temperature,
how='numba', **kwargs)
elif method == 'nrel_numpy':
ephem_df = spa_python(time, location, pressure, temperature,
how='numpy', **kwargs)
elif method == 'pyephem':
ephem_df = pyephem(time, location, pressure, temperature, **kwargs)
elif method == 'ephemeris':
ephem_df = ephemeris(time, location, pressure, temperature, **kwargs)
else:
raise ValueError('Invalid solar position method')
return ephem_df
def spa_c(time, location, pressure=101325, temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : float
Pressure in Pascals
temperature : float
Temperature in C
delta_t : float
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
pvl_logger.debug('using built-in spa code to calculate solar position')
time_utc = localize_to_utc(time, location)
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
timezone=0, # tz corrections handled above
latitude=location.latitude,
longitude=location.longitude,
elevation=location.altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time_utc).tz_convert(location.tz)
if raw_spa_output:
return spa_df
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
def _spa_python_import(how):
"""Compile spa.py appropriately"""
from pvlib import spa
# check to see if the spa module was compiled with numba
using_numba = spa.USE_NUMBA
if how == 'numpy' and using_numba:
# the spa module was compiled to numba code, so we need to
# reload the module without compiling
# the PVLIB_USE_NUMBA env variable is used to tell the module
# to not compile with numba
os.environ['PVLIB_USE_NUMBA'] = '0'
pvl_logger.debug('Reloading spa module without compiling')
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how == 'numba' and not using_numba:
# The spa module was not compiled to numba code, so set
# PVLIB_USE_NUMBA so it does compile to numba on reload.
os.environ['PVLIB_USE_NUMBA'] = '1'
pvl_logger.debug('Reloading spa module, compiling with numba')
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how != 'numba' and how != 'numpy':
raise ValueError("how must be either 'numba' or 'numpy'")
return spa
def spa_python(time, location, pressure=101325, temperature=12, delta_t=None,
atmos_refract=None, how='numpy', numthreads=4):
"""
Calculate the solar position using a python implementation of the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : int or float, optional
avg. yearly air pressure in Pascals.
temperature : int or float, optional
avg. yearly air temperature in degrees C.
delta_t : float, optional
Difference between terrestrial time and UT1.
The USNO has historical and forecasted delta_t [3].
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_zenith (degrees),
zenith (degrees),
apparent_elevation (degrees),
elevation (degrees),
azimuth (degrees),
equation_of_time (minutes).
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar
radiation applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_c, ephemeris
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating solar position with spa_python code')
lat = location.latitude
lon = location.longitude
elev = location.altitude
pressure = pressure / 100 # pressure must be in millibars for calculation
delta_t = delta_t or 67.0
atmos_refract = atmos_refract or 0.5667
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
unixtime = localize_to_utc(time, location).astype(np.int64)/10**9
spa = _spa_python_import(how)
app_zenith, zenith, app_elevation, elevation, azimuth, eot = spa.solar_position(
unixtime, lat, lon, elev, pressure, temperature, delta_t,
atmos_refract, numthreads)
result = pd.DataFrame({'apparent_zenith': app_zenith, 'zenith': zenith,
'apparent_elevation': app_elevation,
'elevation': elevation, 'azimuth': azimuth,
'equation_of_time': eot},
index=time)
try:
result = result.tz_convert(location.tz)
except TypeError:
result = result.tz_localize(location.tz)
return result
def get_sun_rise_set_transit(time, location, how='numpy', delta_t=None,
numthreads=4):
"""
Calculate the sunrise, sunset, and sun transit times using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Only the date part is used
location : pvlib.Location object
delta_t : float, optional
Difference between terrestrial time and UT1.
By default, use USNO historical data and predictions
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
sunrise, sunset, transit
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating sunrise, set, transit with spa_python code')
lat = location.latitude
lon = location.longitude
delta_t = delta_t or 67.0
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
# must convert to midnight UTC on day of interest
utcday = pd.DatetimeIndex(time.date).tz_localize('UTC')
unixtime = utcday.astype(np.int64)/10**9
spa = _spa_python_import(how)
transit, sunrise, sunset = spa.transit_sunrise_sunset(
unixtime, lat, lon, delta_t, numthreads)
# arrays are in seconds since epoch format, need to conver to timestamps
transit = pd.to_datetime(transit, unit='s', utc=True).tz_convert(
location.tz).tolist()
sunrise = pd.to_datetime(sunrise, unit='s', utc=True).tz_convert(
location.tz).tolist()
sunset = pd.to_datetime(sunset, unit='s', utc=True).tz_convert(
location.tz).tolist()
result = pd.DataFrame({'transit': transit,
'sunrise': sunrise,
'sunset': sunset}, index=time)
try:
result = result.tz_convert(location.tz)
except TypeError:
result = result.tz_localize(location.tz)
return result
def _ephem_setup(location, pressure, temperature):
import ephem
# initialize a PyEphem observer
obs = ephem.Observer()
obs.lat = str(location.latitude)
obs.lon = str(location.longitude)
obs.elevation = location.altitude
obs.pressure = pressure / 100. # convert to mBar
obs.temp = temperature
# the PyEphem sun
sun = ephem.Sun()
return obs, sun
def pyephem(time, location, pressure=101325, temperature=12):
"""
Calculate the solar position using the PyEphem package.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : int or float, optional
air pressure in Pascals.
temperature : int or float, optional
air temperature in degrees C.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_elevation, elevation,
apparent_azimuth, azimuth,
apparent_zenith, zenith.
See also
--------
spa_python, spa_c, ephemeris
"""
# Written by Will Holmgren (@wholmgren), University of Arizona, 2014
try:
import ephem
except ImportError:
raise ImportError('PyEphem must be installed')
pvl_logger.debug('using PyEphem to calculate solar position')
time_utc = localize_to_utc(time, location)
sun_coords = pd.DataFrame(index=time_utc)
obs, sun = _ephem_setup(location, pressure, temperature)
# make and fill lists of the sun's altitude and azimuth
# this is the pressure and temperature corrected apparent alt/az.
alts = []
azis = []
for thetime in sun_coords.index:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['apparent_elevation'] = alts
sun_coords['apparent_azimuth'] = azis
# redo it for p=0 to get no atmosphere alt/az
obs.pressure = 0
alts = []
azis = []
for thetime in sun_coords.index:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['elevation'] = alts
sun_coords['azimuth'] = azis
# convert to degrees. add zenith
sun_coords = np.rad2deg(sun_coords)
sun_coords['apparent_zenith'] = 90 - sun_coords['apparent_elevation']
sun_coords['zenith'] = 90 - sun_coords['elevation']
try:
return sun_coords.tz_convert(location.tz)
except TypeError:
return sun_coords.tz_localize(location.tz)
def ephemeris(time, location, pressure=101325, temperature=12):
"""
Python-native solar position calculator.
The accuracy of this code is not guaranteed.
Consider using the built-in spa_c code or the PyEphem library.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location
pressure : float or Series
Ambient pressure (Pascals)
temperature : float or Series
Ambient temperature (C)
Returns
-------
DataFrame with the following columns:
* apparent_elevation : apparent sun elevation accounting for
atmospheric refraction.
* elevation : actual elevation (not accounting for refraction)
of the sun in decimal degrees, 0 = on horizon.
The complement of the zenith angle.
* azimuth : Azimuth of the sun in decimal degrees East of North.
This is the complement of the apparent zenith angle.
* apparent_zenith : apparent sun zenith accounting for atmospheric
refraction.
* zenith : Solar zenith angle
* solar_time : Solar time in decimal hours (solar noon is 12.00).
References
-----------
Grover Hughes' class and related class materials on Engineering
Astronomy at Sandia National Laboratories, 1985.
See also
--------
pyephem, spa_c, spa_python
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Most comments in this function are from PVLIB_MATLAB or from
# pvlib-python's attempt to understand and fix problems with the
# algorithm. The comments are *not* based on the reference material.
# This helps a little bit:
# http://www.cv.nrao.edu/~rfisher/Ephemerides/times.html
pvl_logger.debug('location={}, temperature={}, pressure={}'.format(
location, temperature, pressure))
# the inversion of longitude is due to the fact that this code was
# originally written for the convention that positive longitude were for
# locations west of the prime meridian. However, the correct convention (as
# of 2009) is to use negative longitudes for locations west of the prime
# meridian. Therefore, the user should input longitude values under the
# correct convention (e.g. Albuquerque is at -106 longitude), but it needs
# to be inverted for use in the code.
Latitude = location.latitude
Longitude = -1 * location.longitude
Abber = 20 / 3600.
LatR = np.radians(Latitude)
# the SPA algorithm needs time to be expressed in terms of
# decimal UTC hours of the day of the year.
# first convert to utc
time_utc = localize_to_utc(time, location)
# strip out the day of the year and calculate the decimal hour
DayOfYear = time_utc.dayofyear
DecHours = (time_utc.hour + time_utc.minute/60. + time_utc.second/3600. +
time_utc.microsecond/3600.e6)
UnivDate = DayOfYear
UnivHr = DecHours
Yr = time_utc.year - 1900
YrBegin = 365 * Yr + np.floor((Yr - 1) / 4.) - 0.5
Ezero = YrBegin + UnivDate
T = Ezero / 36525.
# Calculate Greenwich Mean Sidereal Time (GMST)
GMST0 = 6 / 24. + 38 / 1440. + (
45.836 + 8640184.542 * T + 0.0929 * T ** 2) / 86400.
GMST0 = 360 * (GMST0 - np.floor(GMST0))
GMSTi = np.mod(GMST0 + 360 * (1.0027379093 * UnivHr / 24.), 360)
# Local apparent sidereal time
LocAST = np.mod((360 + GMSTi - Longitude), 360)
EpochDate = Ezero + UnivHr / 24.
T1 = EpochDate / 36525.
ObliquityR = np.radians(
23.452294 - 0.0130125 * T1 - 1.64e-06 * T1 ** 2 + 5.03e-07 * T1 ** 3)
MlPerigee = 281.22083 + 4.70684e-05 * EpochDate + 0.000453 * T1 ** 2 + (
3e-06 * T1 ** 3)
MeanAnom = np.mod((358.47583 + 0.985600267 * EpochDate - 0.00015 *
T1 ** 2 - 3e-06 * T1 ** 3), 360)
Eccen = 0.01675104 - 4.18e-05 * T1 - 1.26e-07 * T1 ** 2
EccenAnom = MeanAnom
E = 0
while np.max(abs(EccenAnom - E)) > 0.0001:
E = EccenAnom
EccenAnom = MeanAnom + np.degrees(Eccen)*np.sin(np.radians(E))
TrueAnom = (
2 * np.mod(np.degrees(np.arctan2(((1 + Eccen) / (1 - Eccen)) ** 0.5 *
np.tan(np.radians(EccenAnom) / 2.), 1)), 360))
EcLon = np.mod(MlPerigee + TrueAnom, 360) - Abber
EcLonR = np.radians(EcLon)
DecR = np.arcsin(np.sin(ObliquityR)*np.sin(EcLonR))
RtAscen = np.degrees(np.arctan2(np.cos(ObliquityR)*np.sin(EcLonR),
np.cos(EcLonR)))
HrAngle = LocAST - RtAscen
HrAngleR = np.radians(HrAngle)
HrAngle = HrAngle - (360 * ((abs(HrAngle) > 180)))
SunAz = np.degrees(np.arctan2(-np.sin(HrAngleR),
np.cos(LatR)*np.tan(DecR) -
np.sin(LatR)*np.cos(HrAngleR)))
SunAz[SunAz < 0] += 360
SunEl = np.degrees(np.arcsin(
np.cos(LatR) * np.cos(DecR) * np.cos(HrAngleR) +
np.sin(LatR) * np.sin(DecR)))
SolarTime = (180 + HrAngle) / 15.
# Calculate refraction correction
Elevation = SunEl
TanEl = pd.Series(np.tan(np.radians(Elevation)), index=time_utc)
Refract = pd.Series(0, index=time_utc)
Refract[(Elevation > 5) & (Elevation <= 85)] = (
58.1/TanEl - 0.07/(TanEl**3) + 8.6e-05/(TanEl**5))
Refract[(Elevation > -0.575) & (Elevation <= 5)] = (
Elevation *
(-518.2 + Elevation*(103.4 + Elevation*(-12.79 + Elevation*0.711))) +
1735)
Refract[(Elevation > -1) & (Elevation <= -0.575)] = -20.774 / TanEl
Refract *= (283/(273. + temperature)) * (pressure/101325.) / 3600.
ApparentSunEl = SunEl + Refract
# make output DataFrame
DFOut = pd.DataFrame(index=time_utc).tz_convert(location.tz)
DFOut['apparent_elevation'] = ApparentSunEl
DFOut['elevation'] = SunEl
DFOut['azimuth'] = SunAz
DFOut['apparent_zenith'] = 90 - ApparentSunEl
DFOut['zenith'] = 90 - SunEl
DFOut['solar_time'] = SolarTime
return DFOut
def calc_time(lower_bound, upper_bound, location, attribute, value,
pressure=101325, temperature=12, xtol=1.0e-12):
"""
Calculate the time between lower_bound and upper_bound
where the attribute is equal to value. Uses PyEphem for
solar position calculations.
Parameters
----------
lower_bound : datetime.datetime
upper_bound : datetime.datetime
location : pvlib.Location object
attribute : str
The attribute of a pyephem.Sun object that
you want to solve for. Likely options are 'alt'
and 'az' (which must be given in radians).
value : int or float
The value of the attribute to solve for
pressure : int or float, optional
Air pressure in Pascals. Set to 0 for no
atmospheric correction.
temperature : int or float, optional
Air temperature in degrees C.
xtol : float, optional
The allowed error in the result from value
Returns
-------
datetime.datetime
Raises
------
ValueError
If the value is not contained between the bounds.
AttributeError
If the given attribute is not an attribute of a
PyEphem.Sun object.
"""
try:
import scipy.optimize as so
except ImportError:
raise ImportError('The calc_time function requires scipy')
obs, sun = _ephem_setup(location, pressure, temperature)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return getattr(sun, attr) - target
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub,
(value, attribute), xtol=xtol)
return djd_to_datetime(djd_root, location.tz)
def pyephem_earthsun_distance(time):
"""
Calculates the distance from the earth to the sun using pyephem.
Parameters
----------
time : pd.DatetimeIndex
Returns
-------
pd.Series. Earth-sun distance in AU.
"""
pvl_logger.debug('solarposition.pyephem_earthsun_distance()')
import ephem
sun = ephem.Sun()
earthsun = []
for thetime in time:
sun.compute(ephem.Date(thetime))
earthsun.append(sun.earth_distance)
return pd.Series(earthsun, index=time)
| bsd-3-clause |
dsm054/pandas | pandas/io/common.py | 1 | 19663 | """Common IO api utilities"""
import codecs
from contextlib import closing, contextmanager
import csv
import mmap
import os
import zipfile
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, string_types, text_type
from pandas.errors import ( # noqa
AbstractMethodError, DtypeWarning, EmptyDataError, ParserError,
ParserWarning)
from pandas.core.dtypes.common import is_file_like, is_number
from pandas.io.formats.printing import pprint_thing
# gh-12665: Alias for now and remove later.
CParserError = ParserError
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
_NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',
'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan',
'-nan', ''}
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException # noqa
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url # noqa
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError # noqa
from httplib import HTTPException # noqa
from contextlib import contextmanager, closing # noqa
from functools import wraps # noqa
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class BaseIterator(object):
"""Subclass this and provide a "__next__()" method to obtain an iterator.
Useful only when the object being iterated is non-reusable (e.g. OK for a
parser, not for an in-memory table, yes for its iterator)."""
def __iter__(self):
return self
def __next__(self):
raise AbstractMethodError(self)
if not compat.PY3:
BaseIterator.next = lambda self: self.__next__()
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except Exception:
return False
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, string_types):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
def _stringify_path(filepath_or_buffer):
"""Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
For backwards compatibility with older pythons, pathlib.Path and
py.path objects are specially coerced.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
try:
from py.path import local as LocalPath
_PY_PATH_INSTALLED = True
except ImportError:
_PY_PATH_INSTALLED = False
if hasattr(filepath_or_buffer, '__fspath__'):
return filepath_or_buffer.__fspath__()
if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
return text_type(filepath_or_buffer)
if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
return filepath_or_buffer.strpath
return filepath_or_buffer
def is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
except Exception:
return False
def is_gcs_url(url):
"""Check for a gcs url"""
try:
return parse_url(url).scheme in ['gcs', 'gs']
except Exception:
return False
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None, mode=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
mode : str, optional
Returns
-------
tuple of ({a filepath_ or buffer or S3File instance},
encoding, str,
compression, str,
should_close, bool)
"""
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if _is_url(filepath_or_buffer):
req = _urlopen(filepath_or_buffer)
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
# Override compression based on Content-Encoding header
compression = 'gzip'
reader = BytesIO(req.read())
req.close()
return reader, encoding, compression, True
if is_s3_url(filepath_or_buffer):
from pandas.io import s3
return s3.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression,
mode=mode)
if is_gcs_url(filepath_or_buffer):
from pandas.io import gcs
return gcs.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression,
mode=mode)
if isinstance(filepath_or_buffer, (compat.string_types,
compat.binary_type,
mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression, False
if not is_file_like(filepath_or_buffer):
msg = "Invalid file path or buffer object type: {_type}"
raise ValueError(msg.format(_type=type(filepath_or_buffer)))
return filepath_or_buffer, None, compression, False
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
_compression_to_extension = {
'gzip': '.gz',
'bz2': '.bz2',
'zip': '.zip',
'xz': '.xz',
}
def _infer_compression(filepath_or_buffer, compression):
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer :
a path (str) or buffer
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None :
compression method
Raises
------
ValueError on invalid compression specified
"""
# No compression has been explicitly specified
if compression is None:
return None
# Infer compression
if compression == 'infer':
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, compat.string_types):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
msg = 'Unrecognized compression type: {}'.format(compression)
valid = ['infer', None] + sorted(_compression_to_extension)
msg += '\nValid compression types are {}'.format(valid)
raise ValueError(msg)
def _get_handle(path_or_buf, mode, encoding=None, compression=None,
memory_map=False, is_text=True):
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf :
a path (str) or buffer
mode : str
mode to open path_or_buf with
encoding : str or None
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
whether file/buffer is in text format (csv, json, etc.), or in binary
mode (pickle, etc.)
Returns
-------
f : file-like
A file-like object
handles : list of file-like objects
A list of file-like object that were opened in this function.
"""
try:
from s3fs import S3File
need_text_wrapping = (BytesIO, S3File)
except ImportError:
need_text_wrapping = (BytesIO,)
handles = list()
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
path_or_buf = _stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, compat.string_types)
if is_path:
compression = _infer_compression(path_or_buf, compression)
if compression:
if compat.PY2 and not is_path and encoding:
msg = 'compression with encoding is not yet supported in Python 2'
raise ValueError(msg)
# GZ Compression
if compression == 'gzip':
import gzip
if is_path:
f = gzip.open(path_or_buf, mode)
else:
f = gzip.GzipFile(fileobj=path_or_buf)
# BZ Compression
elif compression == 'bz2':
import bz2
if is_path:
f = bz2.BZ2File(path_or_buf, mode)
elif compat.PY2:
# Python 2's bz2 module can't take file objects, so have to
# run through decompress manually
f = StringIO(bz2.decompress(path_or_buf.read()))
path_or_buf.close()
else:
f = bz2.BZ2File(path_or_buf)
# ZIP Compression
elif compression == 'zip':
zf = BytesZipFile(path_or_buf, mode)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == 'w':
f = zf
elif zf.mode == 'r':
zip_names = zf.namelist()
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError('Zero files found in ZIP file {}'
.format(path_or_buf))
else:
raise ValueError('Multiple files found in ZIP file.'
' Only one file per ZIP: {}'
.format(zip_names))
# XZ Compression
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path_or_buf, mode)
# Unrecognized Compression
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
handles.append(f)
elif is_path:
if compat.PY2:
# Python 2
mode = "wb" if mode == "w" else mode
f = open(path_or_buf, mode)
elif encoding:
# Python 3 and encoding
f = open(path_or_buf, mode, encoding=encoding, newline="")
elif is_text:
# Python 3 and no explicit encoding
f = open(path_or_buf, mode, errors='replace', newline="")
else:
# Python 3 and binary mode
f = open(path_or_buf, mode)
handles.append(f)
# in Python 3, convert BytesIO or fileobjects passed with an encoding
if (compat.PY3 and is_text and
(compression or isinstance(f, need_text_wrapping))):
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
handles.append(f)
if memory_map and hasattr(f, 'fileno'):
try:
g = MMapWrapper(f)
f.close()
f = g
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
pass
return f, handles
class BytesZipFile(zipfile.ZipFile, BytesIO):
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs):
if mode in ['wb', 'rb']:
mode = mode.replace('b', '')
super(BytesZipFile, self).__init__(file, mode, compression, **kwargs)
def write(self, data):
super(BytesZipFile, self).writestr(self.filename, data)
@property
def closed(self):
return self.fp is None
class MMapWrapper(BaseIterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f):
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name):
return getattr(self.mmap, name)
def __iter__(self):
return self
def __next__(self):
newline = self.mmap.readline()
# readline returns bytes, not str, in Python 3,
# but Python's CSV reader expects str, so convert
# the output to str before continuing
if compat.PY3:
newline = compat.bytes_to_str(newline)
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == '':
raise StopIteration
return newline
if not compat.PY3:
MMapWrapper.next = lambda self: self.__next__()
class UTF8Recoder(BaseIterator):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def read(self, bytes=-1):
return self.reader.read(bytes).encode("utf-8")
def readline(self):
return self.reader.readline().encode("utf-8")
def next(self):
return next(self.reader).encode("utf-8")
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader(BaseIterator):
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and re-encode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and re-encode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
| bsd-3-clause |
apapadopoulos/MultiCoreMigrationSimulator | libs/Tests.py | 1 | 3591 | import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import sys
import libs.Process as proc
import libs.Controller as ctrl
import libs.Scheduler as sched
import libs.Utils as ut
def testInnerLoop(tFin):
G = proc.Process(ident=1,alpha=1,stdDev=0.01)
G.viewProcess()
R = ctrl.I(ident=G.getID(), Ki=0.25)
tauto = 0.5
vtauto = np.zeros((tFin,1))
vtaut = np.zeros((tFin,1))
for kk in xrange(1,tFin+1):
taut = G.getY()
u = R.computeU(tauto,taut)
G.setU(u)
# Store variables
vtauto[kk-1,0] = tauto
vtaut[kk-1,0] = taut
plt.plot(xrange(0,tFin),vtaut,'b')
plt.plot(xrange(0,tFin),vtauto,'k--')
plt.show()
def testSchedulerAddRemoveThreads(tFin,numThreads):
# Creating numThreads threads
Threads = []
alphas = []
for i in xrange(0,numThreads):
alpha = 0.1
Threads.append(proc.Process(ident=i,alpha=alpha, stdDev = 0.0))
Threads[i].viewProcess()
scheduler = sched.IplusPI(ident=0, Kiin=0.25, Kpout=2.0, Kiout=0.25)
tauro = 1
vtauro = np.zeros((tFin,1))
vtaur = np.zeros((tFin,1))
vtauto = []
vtaut = []
for kk in xrange(1,tFin+1):
if kk == 100:
print 'Adding a process...'
numThreads = ut.addProcess(Threads,alpha=0.5,ident=100)
if kk == 200:
print 'Removing a process...'
numThreads = ut.removeProcess(Threads,100)
if kk == 300:
print 'Adding a process...'
numThreads = ut.addProcess(Threads,alpha=0.6,ident=100,stdDev=0)
if kk == 400:
print 'Removing a process...'
numThreads = ut.removeProcess(Threads,90)
if kk == 410:
print 'Removing a process...'
numThreads = ut.removeProcess(Threads,100)
taur, taut, tauto = scheduler.schedule(Threads,tauro)
scheduler.viewUtilization()
# Store variables
vtauro[kk-1,0] = tauro
vtaur[kk-1,0] = taur
vtauto.append(tauto)
vtaut.append(taut)
plt.plot(xrange(0,tFin),vtaur,'b')
plt.plot(xrange(0,tFin),vtauro,'k--')
plt.show()
def testSchedulerWithInternalDataPlot(tFin,numThreads):
# Creating numThreads threads
Threads = []
alphas = []
for i in xrange(0,numThreads):
alpha = 1.0/(i+1)
Threads.append(proc.Process(ident=i,alpha=alpha, stdDev = 0.01))
Threads[i].viewProcess()
scheduler = sched.IplusPI(ident=0, Kiin=0.25, Kpout=2.0, Kiout=0.25)
tauro = 1
vtauro = np.zeros((tFin,1))
vtaur = np.zeros((tFin,1))
vtauto = np.zeros((tFin,numThreads))
vtaut = np.zeros((tFin,numThreads))
for kk in xrange(1,tFin+1):
taur, taut, tauto = scheduler.schedule(Threads,tauro)
scheduler.viewUtilization()
# Store variables
vtauro[kk-1,0] = tauro
vtaur[kk-1,0] = taur
vtauto[kk-1,:] = tauto
vtaut[kk-1,:] = taut
plt.figure(1)
plt.plot(xrange(0,tFin),vtaur,'b')
plt.plot(xrange(0,tFin),vtauro,'k--')
plt.figure(2)
plt.plot(xrange(0,tFin),vtaut)
plt.plot(xrange(0,tFin),vtauto,'--')
plt.show()
def testSchedulerNoThreads(tFin):
# Creating numThreads threads
Threads = []
alphas = []
scheduler = sched.IplusPI(ident=0, Kiin=0.25, Kpout=2.0, Kiout=0.25)
tauro = 1
vtauro = np.zeros((tFin,1))
vtaur = np.zeros((tFin,1))
vtauto = []
vtaut = []
for kk in xrange(1,tFin+1):
if kk == 100:
print 'Adding a process...'
numThreads = ut.addProcess(Threads,alpha=0.5,ident=100)
if kk == 200:
print 'Removing a process...'
numThreads = ut.removeProcess(Threads,100)
taur, taut, tauto = scheduler.schedule(Threads,tauro)
scheduler.viewUtilization()
# Store variables
vtauro[kk-1,0] = tauro
vtaur[kk-1,0] = taur
vtauto.append(tauto)
vtaut.append(taut)
plt.plot(xrange(0,tFin),vtaur,'b')
plt.plot(xrange(0,tFin),vtauro,'k--')
plt.show() | gpl-2.0 |
FrancoisRheaultUS/dipy | tools/make_examples.py | 3 | 5749 | #!/usr/bin/env python
"""Run the py->rst conversion and run all examples.
Steps are:
analyze example index file for example py filenames
check for any filenames in example directory not included
do py to rst conversion, writing into build directory
run
"""
# -----------------------------------------------------------------------------
# Library imports
# -----------------------------------------------------------------------------
# Stdlib imports
import os
import os.path as op
import sys
import shutil
import io
from subprocess import check_call
from glob import glob
from time import time
# Third-party imports
# We must configure the mpl backend before making any further mpl imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib._pylab_helpers import Gcf
import dipy
# -----------------------------------------------------------------------------
# Function defintions
# -----------------------------------------------------------------------------
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
# We must change the show command to save instead
def show():
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
(figure_basename, fcount + 1))
_mpl_show = plt.show
plt.show = show
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Where things are
DOC_PATH = op.abspath('..')
EG_INDEX_FNAME = op.join(DOC_PATH, 'examples_index.rst')
EG_SRC_DIR = op.join(DOC_PATH, 'examples')
# Work in examples directory
# os.chdir(op.join(DOC_PATH, 'examples_built'))
if not os.getcwd().endswith(op.join('doc', 'examples_built')):
raise OSError('This must be run from the doc directory')
# Copy the py files; check they are in the examples list and warn if not
with io.open(EG_INDEX_FNAME, 'rt', encoding="utf8") as f:
eg_index_contents = f.read()
# Here I am adding an extra step. The list of examples to be executed need
# also to be added in the following file (valid_examples.txt). This helps
# with debugging the examples and the documentation only a few examples at
# the time.
flist_name = op.join(op.dirname(os.getcwd()), 'examples',
'valid_examples.txt')
with io.open(flist_name, "r", encoding="utf8") as flist:
validated_examples = flist.readlines()
# Parse "#" in lines
validated_examples = [line.split("#", 1)[0] for line in validated_examples]
# Remove leading and trailing white space from example names
validated_examples = [line.strip() for line in validated_examples]
# Remove blank lines
validated_examples = list(filter(None, validated_examples))
for example in validated_examples:
fullpath = op.join(EG_SRC_DIR, example)
if not example.endswith(".py"):
print("%s not a python file, skipping." % example)
continue
elif not op.isfile(fullpath):
print("Cannot find file, %s, skipping." % example)
continue
shutil.copyfile(fullpath, example)
# Check that example file is included in the docs
file_root = example[:-3]
if file_root not in eg_index_contents:
msg = "Example, %s, not in index file %s."
msg = msg % (example, EG_INDEX_FNAME)
print(msg)
# Run the conversion from .py to rst file
check_call('{} ../../tools/ex2rst --project dipy --outdir . .'.format(sys.executable), shell=True)
# added the path so that scripts can import other scripts on the same directory
sys.path.insert(0, os.getcwd())
if not op.isdir('fig'):
os.mkdir('fig')
use_xvfb = os.environ.get('TEST_WITH_XVFB', False)
use_memprof = os.environ.get('TEST_WITH_MEMPROF', False)
if use_xvfb:
try:
from xvfbwrapper import Xvfb
except ImportError:
raise RuntimeError("You are trying to run a documentation build",
"with 'TEST_WITH_XVFB' set to True, but ",
"xvfbwrapper is not available. Please install",
"xvfbwrapper and try again")
display = Xvfb(width=1920, height=1080)
display.start()
if use_memprof:
try:
import memory_profiler
except ImportError:
raise RuntimeError("You are trying to run a documentation build",
"with 'TEST_WITH_MEMPROF' set to True, but ",
"memory_profiler is not available. Please install",
"memory_profiler and try again")
name = ''
def run_script():
namespace = {}
t1 = time()
with io.open(script, encoding="utf8") as f:
exec(f.read(), namespace)
t2 = time()
print("That took %.2f seconds to run" % (t2 - t1))
plt.close('all')
del namespace
# Execute each python script in the directory:
for script in validated_examples:
figure_basename = op.join('fig', op.splitext(script)[0])
if use_memprof:
print("memory profiling ", script)
memory_profiler.profile(run_script)()
else:
print('*************************************************************')
print(script)
print('*************************************************************')
run_script()
if use_xvfb:
display.stop()
# clean up stray images, pickles, npy files, etc
for globber in ('*.nii.gz', '*.dpy', '*.npy', '*.pkl', '*.mat', '*.img',
'*.hdr'):
for fname in glob(globber):
os.unlink(fname)
| bsd-3-clause |
lavakyan/mstm-spectrum | mstm_studio/mstm_spectrum.py | 1 | 34171 | # -*- coding: utf-8 -*-
#
# ----------------------------------------------------- #
# #
# This code is a part of T-matrix fitting project #
# Contributors: #
# L. Avakyan <[email protected]> #
# K. Yablunovskiy <[email protected]> #
# #
# ----------------------------------------------------- #
"""
Based on heaviliy rewritten MSTM-GUI code
<URL:https://github.com/dmayerich/mstm-gui>
<https://git.stim.ee.uh.edu/optics/mstm-gui.git>
by Dr. David Mayerich
Optimized for spectral calculations (for many wavelengths)
in order to use for fitting to experiment
"""
from __future__ import print_function
from __future__ import division
import numpy as np
from numpy.random import lognormal
from scipy import interpolate
import subprocess
import os # to delete files after calc.
import sys # to check whether running on Linux or Windows
import datetime
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import time
import tempfile # to run mstm in temporary directory
# use input in both python2 and python3
try:
input = raw_input
except NameError:
pass
# use xrange in both python2 and python3
try:
xrange
except NameError:
xrange = range
class Profiler(object):
"""
This class for benchmarking is from
http://onesteptospace.blogspot.pt/2013/01/python.html
Usage:
>>> with Profiler() as p:
>>> // your code to be profiled here
"""
def __enter__(self):
self._startTime = time.time()
def __exit__(self, type, value, traceback):
print('Elapsed time: {:.3f} sec'.format(time.time() - self._startTime))
class SpheresOverlapError(Exception):
pass
class SPR(object):
"""
Class for calculation of surface plasmin resonance (SPR),
running MSTM external code.
The MSTM executable should be set in MSTM_BIN environment
variable. Default is ~/bin/mstm.x
"""
environment_material = 'Air'
paramDict = {
'number_spheres': 0,
'sphere_position_file': '', # radius, X,Y,Z [nm], n ,k
'length_scale_factor': 1.0, # 2π/λ[nm]
'real_ref_index_scale_factor': 1.0, # multiplier for spheres
'imag_ref_index_scale_factor': 1.0,
'real_chiral_factor': 0.0, # chiral passive spheres
'imag_chiral_factor': 0.0,
'medium_real_ref_index': 1.0, # refraction index of the environment
'medium_imag_ref_index': 0.0,
'medium_real_chiral_factor': 0.0,
'medium_imag_chiral_factor': 0.0,
'target_euler_angles_deg': [0.0, 0.0, 0.0], # ignored for random orient. calc.
'mie_epsilon': 1.0E-12, # Convergence criterion for determining the number of orders
# in the Mie expansions. Negative value - number of orders.
'translation_epsilon': 1.0E-8, # Convergence criterion for estimating the maximum order of the cluster T matrix
'solution_epsilon': 1.0E-8, # Precision of linear equation system solution
't_matrix_convergence_epsilon': 1.0E-6,
'plane_wave_epsilon': 1E-3, # Precision of expansion of incedent field (both for palne and gaussian waves)
'iterations_per_correction': 20, # ignored for big 'near_field_translation_distance'
'max_number_iterations': 2000, # with account of all iterations
'near_field_translation_distance': 1.0E6, # can be big real, small real or negative. TWEAK FOR PERFORMANCE
'store_translation_matrix': 0,
'fixed_or_random_orientation': 1, # 0 - fixed, 1 - random
'gaussian_beam_constant': 0, # CB = 1/(k ω0). CB = 0 - plane wave
'gaussian_beam_focal_point': [0.0, 0.0, 0.0], # does not alters results for plane wave and random orientations
'run_print_file': '', # if balnk will use stdout
'write_sphere_data': 0, # 1 - detail, 0 - concise
'output_file': 'test.dat', # should change for each run
'incident_or_target_frame': 0, # Integer switch, relevant only for
# fixed orientation calculations
'min_scattering_angle_deg': 0.0,
'max_scattering_angle_deg': 180.0,
'min_scattering_plane_angle_deg': 0.0, # selects a plane for fixed orient.
'max_scattering_plane_angle_deg': 0.0, # selects a plane for fixed orient.
'delta_scattering_angle_deg': 1.0,
'calculate_near_field': 0, # no near field calculations
'calculate_t_matrix': 1, # 1 - new calc., 0 - use old, 2 - continue calc
't_matrix_file': 'tmatrix-temp.dat',
'sm_number_processors': 10, # actual number of procesors is
# minimum to this value and provided by mpi
}
local_keys = ['output_file', 'length_scale_factor',
'medium_real_ref_index', 'medium_imag_ref_index',
't_matrix_file']
def __init__(self, wavelengths):
'''
Parameter:
wavelengths: numpy array
Wavelegths in nm
'''
self.wavelengths = wavelengths
self.command = os.environ.get('MSTM_BIN', '~/bin/mstm.x')
def set_spheres(self, spheres):
self.spheres = spheres
# count spheres with positive radius:
self.paramDict['number_spheres'] = np.sum(self.spheres.a > 0)
def simulate(self, outfn=None):
'''
Start the simulation.
The inpuit parameters are read from object dictionary `paramDict`.
Routine will prepare input file `scriptParams.inp` in the temporary folder,
which will be deleted after calculation.
After calculation the result depends on the polarization setting.
For polarized light the object fields will be filled:
extinction_par, extinction_ort,
absorbtion_par, absorbtion_ort,
scattering_par, scattering_ort.
While for orientation-averaged calculation just:
extinction, absorbtion and scattering.
'''
if self.paramDict['number_spheres'] == 0: # np spheres
return self.wavelengths, np.zeros_like(self.wavelengths)
if self.spheres.check_overlap():
raise SpheresOverlapError('Spheres overlapping!')
if isinstance(self.environment_material, Material):
material = self.environment_material
else:
print(self.environment_material)
material = Material(self.environment_material)
with tempfile.TemporaryDirectory() as tmpdir:
print('Using temporary directory: %s' % tmpdir)
outFID = open(os.path.join(tmpdir, 'scriptParams.inp'), 'w')
outFID.write('begin_comment\n')
outFID.write('**********************************\n')
outFID.write(' MSTM input for SPR calculation\n')
outFID.write(' Generated by python script\n')
outFID.write(' %s\n' %
datetime.datetime.now().strftime('%Y-%m-%d %H:%M'))
outFID.write('**********************************\n')
outFID.write('end_comment\n')
for key in self.paramDict.keys():
if key not in self.local_keys:
outFID.write(key + '\n')
if isinstance(self.paramDict[key], str):
svalue = self.paramDict[key]
else:
if isinstance(self.paramDict[key], list):
svalue = ' '.join(map(str, self.paramDict[key]))
else:
svalue = str(self.paramDict[key])
# replace exponent symbol
svalue = svalue.replace('e', 'd', 1)
outFID.write('%s \n' % svalue)
for l in self.wavelengths:
outFID.write('begin_comment\n')
outFID.write('**********************************\n')
outFID.write(' Wavelength %.3f \n' % l)
outFID.write('**********************************\n')
outFID.write('end_comment\n')
outFID.write('output_file\n')
outFID.write('mstm_l%.0f.out\n' % (l * 1000))
outFID.write('length_scale_factor\n')
outFID.write(' %.6f\n' % (2.0 * 3.14159 / l))
outFID.write('medium_real_ref_index\n')
outFID.write(' %f\n' % material.get_n(l))
outFID.write('medium_imag_ref_index\n')
outFID.write(' %f\n' % material.get_k(l))
outFID.write('sphere_sizes_and_positions\n')
for i in xrange(len(self.spheres)):
a = self.spheres.a[i]
if a > 0: # consider only positive radii
x = self.spheres.x[i]
y = self.spheres.y[i]
z = self.spheres.z[i]
self.spheres.materials[i].D = 2 * a
n = self.spheres.materials[i].get_n(l)
k = self.spheres.materials[i].get_k(l)
outFID.write(' %.4f %.4f %.4f %.4f %.3f %.3f \n' %
(a, x, y, z, n, k))
outFID.write('new_run\n')
outFID.write('end_of_options\n')
outFID.close()
# run the binary
if sys.platform == 'win32':
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.call('%s scriptParams.inp > NUL' % self.command,
shell=True, startupinfo=si, cwd=tmpdir)
else:
subprocess.call('%s scriptParams.inp > /dev/null' % self.command,
shell=True, cwd=tmpdir)
# parse the simulation results
if self.paramDict['fixed_or_random_orientation'] == 0: # fixed orientation
self.extinction_par = [] # parallel polarization (\hat \alpha)
self.absorbtion_par = []
self.scattering_par = []
self.extinction_ort = [] # perpendicular polarization (\hat \beta)
self.absorbtion_ort = []
self.scattering_ort = []
for l in self.wavelengths:
inFID = open(os.path.join(tmpdir,
'mstm_l%.0f.out' % (l * 1000)),
'r')
while True:
line = inFID.readline()
if 'scattering matrix elements' in line:
break
elif 'parallel total ext, abs, scat efficiencies' in line:
values = map(float,
inFID.readline().strip().split())
values = list(values)
self.extinction_par.append(float(values[0]))
self.absorbtion_par.append(float(values[1]))
self.scattering_par.append(float(values[2]))
elif 'perpendicular total ext' in line:
values = map(float,
inFID.readline().strip().split())
values = list(values)
self.extinction_ort.append(float(values[0]))
self.absorbtion_ort.append(float(values[1]))
self.scattering_ort.append(float(values[2]))
inFID.close()
os.remove(os.path.join(tmpdir,
'mstm_l%.0f.out' % (l * 1000)))
self.extinction_par = np.array(self.extinction_par)
self.absorbtion_par = np.array(self.absorbtion_par)
self.scattering_par = np.array(self.scattering_par)
self.extinction_ort = np.array(self.extinction_ort)
self.absorbtion_ort = np.array(self.absorbtion_ort)
self.scattering_ort = np.array(self.scattering_ort)
return (self.wavelengths,
(self.extinction_par + self.extinction_ort))
else: # random orientation
self.extinction = []
self.absorbtion = []
self.scattering = []
for l in self.wavelengths:
inFID = open(os.path.join(tmpdir,
'mstm_l%.0f.out' % (l * 1000)),
'r')
while True:
line = inFID.readline()
if 'scattering matrix elements' in line:
break
elif 'total ext, abs, scat efficiencies' in line:
values = map(float,
inFID.readline().strip().split())
values = list(values) # python3 is evil
self.extinction.append(float(values[0]))
self.absorbtion.append(float(values[1]))
self.scattering.append(float(values[2]))
inFID.close()
os.remove(os.path.join(tmpdir,
'mstm_l%.0f.out' % (l * 1000)))
self.extinction = np.array(self.extinction)
self.absorbtion = np.array(self.absorbtion)
self.scattering = np.array(self.scattering)
if outfn is not None:
self.write(outfn)
return self.wavelengths, self.extinction
def plot(self):
'''
Plot results with matplotlib.pyplot
'''
plt.plot(self.wavelengths, self.extinction, 'r-', label='extinction')
plt.show()
return plt
def write(self, filename):
'''
Save results to file
'''
if self.paramDict["fixed_or_random_orientation"] == 1: # random
fout = open(filename, 'w')
fout.write('#Wavel.\tExtinct.\n')
for i in range(len(self.wavelengths)):
fout.write('%.4f\t%.8f\r\n' % (self.wavelengths[i],
self.extinction[i]))
fout.close()
else: # fixed
fout = open(filename, 'w')
fout.write('#Wavel.\tExt_par\tExt_ort\n')
for i in range(len(self.wavelengths)):
fout.write('%.4f\t%.8f\t%.8f\r\n' % (self.wavelengths[i],
self.extinction_par[i], self.extinction_ort[i]))
fout.close()
def set_incident_field(self, fixed=False, azimuth_angle=0.0,
polar_angle=0.0, polarization_angle=0.0):
"""
Set the parameters of incident wave
Parameters:
fixed: bool
True - fixed orientation and polarized light
False - average over all orientations and polarizations
azimuth_angle, polar_angle: float (degrees)
polarization_angle: float (degrees)
polarization angle relative to the `k-z` palne.
0 - X-polarized, 90 - Y-polarized (if `azimuth` and
`polar` angles are zero).
"""
if not fixed:
self.paramDict['fixed_or_random_orientation'] = 1 # random
else:
self.paramDict['fixed_or_random_orientation'] = 0 # fixed
self.paramDict['incident_azimuth_angle_deg'] = polarization_angle
self.paramDict['incident_polar_angle_deg'] = polar_angle
self.paramDict['polarization_angle_deg'] = polarization_angle
class Material(object):
r"""
Material class.
Use `get_n()` and `get_k()` methods to obtain values of refraction
index at arbitraty wavelength (in nm).
"""
def __init__(self, file_name, wls=None, nk=None, eps=None):
r"""
Parameters:
file_name:
1. complex value, written in numpy format or as string;
2. one of the predefined strings (air, water, glass);
3. filename with optical constants.
File header should state `lambda`, `n` and `k` columns
If either `nk= n + 1j*k` or `eps = re + 1j*im` arrays are
specified, then the data from one of them will be used
and filename content will be ignored.
wls: float array
array of wavelengths (in nm) used for data interpolation.
If None then ``np.linspace(300, 800, 500)`` will be used.
"""
if isinstance(file_name, str):
self.__name__ = 'Mat_%s' % os.path.basename(file_name)
else:
self.__name__ = 'Mat_%.3f' % file_name
if wls is None:
wl_min = 200 # 149.9
wl_max = 1200 # 950.1
wls = np.array([wl_min, wl_max])
k = np.array([0.0, 0.0])
if nk is not None:
n = np.real(nk)
k = np.imag(nk)
elif eps is not None:
mod = np.absolute(eps)
n = np.sqrt((mod + np.real(eps)) / 2)
k = np.sqrt((mod - np.real(eps)) / 2)
else:
try:
np.complex(file_name)
is_complex = True
except ValueError:
is_complex = False
if is_complex:
nk = np.complex(file_name)
n = np.array([np.real(nk), np.real(nk)])
k = np.array([np.imag(nk), np.imag(nk)])
else:
if file_name.lower() == 'air':
n = np.array([1.0, 1.0])
elif file_name.lower() == 'water':
n = np.array([1.33, 1.33])
elif file_name.lower() == 'glass':
n = np.array([1.66, 1.66])
else:
optical_constants = np.genfromtxt(file_name, names=True)
wls = optical_constants['lambda']
if np.max(wls) < 100: # wavelengths are in micrometers
wls = wls * 1000 # convert to nm
n = optical_constants['n']
k = optical_constants['k']
if wls[0] > wls[1]: # form bigger to smaller
wls = np.flipud(wls) # reverse order
n = np.flipud(n)
k = np.flipud(k)
n = n[wls > wl_min]
k = k[wls > wl_min]
wls = wls[wls > wl_min]
n = n[wls < wl_max]
k = k[wls < wl_max]
wls = wls[wls < wl_max]
wl_step = np.abs(wls[1] - wls[0])
if (wl_step > 1.1) and (wl_step < 500):
interp_kind = 'cubic' # cubic interpolation
else: # too dense or too sparse mesh, linear interpolation is needed
interp_kind = 'linear'
# print('Interpolation kind : %s'%interp_kind)
self._get_n_interp = interpolate.interp1d(wls, n, kind=interp_kind)
self._get_k_interp = interpolate.interp1d(wls, k, kind=interp_kind)
def get_n(self, wl):
return self._get_n_interp(wl)
def get_k(self, wl):
return self._get_k_interp(wl)
def __str__(self):
return self.__name__
def plot(self, wls=None, fig=None, axs=None):
r"""
plot ``n`` and ``k`` dependence from wavelength
Parameters:
wls: float array
array of wavelengths (in nm). If None then
``np.linspace(300, 800, 500)`` will be used.
fig: matplotlib figure
axs: matplotlib axes
Return:
filled/created fig and axs objects
"""
if wls is None:
wls = np.linspace(300, 800, 501)
flag = fig is None
if flag:
fig = plt.figure()
axs = fig.add_subplot(111)
axs.plot(wls, self.get_n(wls), label='Real')
axs.plot(wls, self.get_k(wls), label='Imag')
axs.set_ylabel('Refraction index')
axs.set_xlabel('Wavelength, nm')
axs.legend()
if flag:
plt.show()
return fig, axs
# class MaterialManager():
# """
# Cache for materials, to decrease file i/o
# """
# def __init__(self, wavelengths):
# self.materials = {}
class Spheres(object):
"""
Abstract collection of spheres
Object fields:
N: int
number of spheres
x, y, z: numpy arrays
coordinates of spheres centers
a: list or arrray
spheres radii
materials: numpy array
Material objects or strings
"""
def __init__(self):
"""
Creates empty collection of spheres. Use child classes for non-empty!
"""
self.N = 0
self.x = []
self.y = []
self.z = []
self.a = [] # radius
self.materials = []
def __len__(self):
return self.N
def check_overlap(self, eps=0.001):
"""
Check if spheres are overlapping
"""
result = False
n = len(self.x)
for i in xrange(n):
for j in xrange(i + 1, n):
dx = abs(self.x[j] - self.x[i])
dy = abs(self.y[j] - self.y[i])
dz = abs(self.z[j] - self.z[i])
Ri = self.a[i]
Rj = self.a[j]
dist = np.sqrt(dx * dx + dy * dy + dz * dz)
if dist < Ri + Rj + eps:
# distance between spheres is less than sum of thier radii
# but there still can be nested spheres, check it
if Ri > Rj:
result = Ri < dist + Rj + eps
else: # Rj < Ri
result = Rj < dist + Ri + eps
if result: # avoid unneeded steps
return True
return result
def append(self, sphere):
"""
Append by data from SingleSphere object
Parameter:
sphere: SingleSphere
"""
self.a = np.append(self.a, sphere.a[0])
self.x = np.append(self.x, sphere.x[0])
self.y = np.append(self.y, sphere.y[0])
self.z = np.append(self.z, sphere.z[0])
self.materials.append(sphere.materials[0])
self.N += 1
def delete(self, i):
"""
Delete element with index `i`
"""
self.a = np.delete(self.a, i)
self.x = np.delete(self.x, i)
self.y = np.delete(self.y, i)
self.z = np.delete(self.z, i)
self.materials.pop(i)
self.N -= 1
def extend(self, spheres):
"""
Append by all items from object `spheres`
"""
for i in xrange(len(spheres)):
self.append(SingleSphere(spheres.x[i], spheres.y[i],
spheres.z[i], spheres.a[i], spheres.materials[i]))
def get_center(self, method=''):
"""
calculate center of masses in assumption of uniform density
Parameter:
method: string {''|'mass'}
If method == 'mass' then center of masses
(strictly speaking, volumes) is calculated.
Otherwise all spheres are averaged evenly.
"""
weights = np.ones(self.N)
if method.lower() == 'mass':
weights = self.a**3
Xc = np.sum(np.dot(self.x, weights)) / np.sum(weights)
Yc = np.sum(np.dot(self.y, weights)) / np.sum(weights)
Zc = np.sum(np.dot(self.z, weights)) / np.sum(weights)
return np.array((Xc, Yc, Zc))
def load(self, filename, mat_filename='etaGold.txt', units='nm'):
"""
Reads spheres coordinates and radii from file.
Parameters:
filename: string
file to be read from
mat_filename: string
all spheres will have this material (sphere-material
storaging is not yet implemented)
units: string {'mum'|'nm'}
distance units.
If 'mum' then coordinated will be scaled (x1000)
"""
x = []
y = []
z = []
a = []
try:
f = open(filename, 'r')
text = f.readlines()
for line in text:
if line[0] != '#': # skip comment and header
words = [w.strip() for w in line.replace(',', '.').split()]
data = [float(w) for w in words]
a.append(data[0])
x.append(data[1])
y.append(data[2])
z.append(data[3])
f.close()
except Exception as err:
print('Load failed \n %s' % err)
self.N = len(a)
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
self.a = np.array(a)
if units == 'mum':
self.x = self.x * 1000.0
self.y = self.y * 1000.0
self.z = self.z * 1000.0
self.a = self.a * 1000.0
self._set_material(mat_filename)
def save(self, filename):
"""
Saves spheres coordinates and radii to file.
Parameter:
filename: string
"""
try:
f = open(filename, 'w')
f.write('#radius\tx\ty\tz\tn\tk\r\n')
for i in xrange(self.N):
wl = 555
a = self.a[i]
x = self.x[i]
y = self.y[i]
z = self.z[i]
n = self.materials[i].get_n(wl)
k = self.materials[i].get_k(wl)
f.write('%f\t\t%f\t\t%f\t\t%f\t\t%f\t\t%f\r\n' %
(a, x, y, z, n, k))
except Exception as err:
print('Save failed \n %s' % err)
finally:
f.close()
class SingleSphere(Spheres):
"""
Collection of spheres with only one sphere
"""
def __init__(self, x, y, z, a, mat_filename='etaGold.txt'):
"""
Parameters:
x, y, z: float
coordinates of spheres centers
a: float
spheres radii
mat_filename: string, float, complex value or Material object
material specification
"""
self.N = 1
self.x = np.array([x])
self.y = np.array([y])
self.z = np.array([z])
self.a = np.array([a])
if isinstance(mat_filename, Material):
self.materials = [mat_filename]
else:
self.materials = [Material(mat_filename)]
class LogNormalSpheres(Spheres):
"""
The set of spheres positioned on the regular mesh
with random Log-Normal distributed sizes.
In the case overlapping of the spheres the sizes
should(?) be regenerated.
"""
def __init__(self, N, mu, sigma, d, mat_filename='etaGold.txt'):
"""
Parameters:
N: int
number of spheres
mu, sigma: floats
parameters of Log-Normal distribution
d: float
average empty space between spheres centers
mat_filename: string or Material object
specification of spheres material
"""
# estimate the box size:
a = mu # average sphere radius
A = (N**(1. / 3) + 1) * (d + 2 * a)
print('Box size estimated as: %.1f nm' % A)
# A = A*1.5
Xc = []
Yc = []
Zc = []
x = -A / 2.0
while x < A / 2.0:
y = -A / 2.0
while y < A / 2.0:
z = -A / 2.0
while z < A / 2.0:
if (x * x + y * y + z * z < A * A / 4.0):
Xc.append(x)
Yc.append(y)
Zc.append(z)
z = z + (2 * a + d)
y = y + (2 * a + d)
x = x + (2 * a + d)
print('Desired number of particles: %i' % N)
print('Number of particles in a box: %i' % len(Xc))
self.N = min([N, len(Xc)])
print('Resulted number of particles: %i' % self.N)
self.x = np.array(Xc)
self.y = np.array(Yc)
self.z = np.array(Zc)
random_a = lognormal(np.log(mu), sigma, self.N) # nm
random_a = random_a
self.a = np.array(random_a)
if isinstance(mat_filename, Material):
mat = mat_filename
else:
mat = Material(mat_filename)
self.materials = [mat for i in xrange(self.N)]
class ExplicitSpheres (Spheres):
def __init__(self, N=0, Xc=[], Yc=[], Zc=[], a=[],
mat_filename='etaGold.txt'):
"""
Create explicitely defined spheres
Parameters:
N: int
number of spheres
Xc, Yc, Zc: lists or numpy arrays
coordinates of the spheres centers
a: list or numpy array
radii of the spheres
mat_filename: string, list of strings, Material or list of
Materials specification of spheres material
Note: If only first array Xc is supplied, than all data is
assumed zipped in it,
i.e.: `Xc = [X1, Y1, Z1, a1, ..., XN, YN, ZN, aN]`
"""
super(ExplicitSpheres, self).__init__()
self.N = N
if N == 0: # special case of empty object
self.x = []
self.y = []
self.z = []
self.a = []
return
if N < len(Xc): # data is zipped in Xc
assert(4 * N == len(Xc))
self.x = np.zeros(N)
self.y = np.zeros(N)
self.z = np.zeros(N)
self.a = np.zeros(N)
i = 0
while i < len(Xc):
self.x[i // 4] = Xc[i + 0]
self.y[i // 4] = Xc[i + 1]
self.z[i // 4] = Xc[i + 2]
self.a[i // 4] = abs(Xc[i + 3])
i = i + 4
else:
self.x = np.array(Xc)
self.y = np.array(Yc)
self.z = np.array(Zc)
self.a = np.abs(np.array(a))
if isinstance(mat_filename, (Material, str)):
# one material filename for all spheres
self._set_material(mat_filename)
elif isinstance(mat_filename, list):
# list of material filenames for all spheres
if len(mat_filename) == 1:
self._set_material(mat_filename[0])
else:
assert(len(mat_filename) == self.N)
for mat_fn in mat_filename:
# TODO: use material manager to avoid re-creating
# and extra file reads
if isinstance(mat_fn, Material):
self.materials.append(mat_fn)
else:
self.materials.append(Material(mat_fn))
else:
raise Exception('Bad material variable: %s' % str(mat_filename))
# if self.check_overlap():
# print('Warning: Spheres are overlapping!')
def _set_material(self, mat_filename):
if isinstance(mat_filename, Material):
mat = mat_filename
else:
mat = Material(mat_filename)
self.materials = [mat for i in xrange(self.N)]
if __name__ == '__main__':
print('Overlap tests')
spheres = Spheres()
print(' Test not overlapped... ')
spheres.x = [-5, 5]
spheres.y = [0, 0]
spheres.z = [0, 0]
spheres.a = [4, 4]
assert(not spheres.check_overlap())
print(' Test overlapped... ')
spheres.a = [5, 5]
assert(spheres.check_overlap())
print(' Test nested... ')
spheres.x = [0, 0]
spheres.a = [2, 5]
assert(not spheres.check_overlap())
spheres.a = [5, 3]
assert(not spheres.check_overlap())
# input('Press enter')
print('Materials test')
mat = Material(os.path.join('nk', 'etaGold.txt'))
# mat.plot()
mat1 = Material(os.path.join('nk', 'etaSilver.txt'))
mat3 = Material('glass')
mat5 = Material(1.5)
mat6 = Material('2.0+0.5j')
mat7 = Material('mat7', wls=np.linspace(300, 800, 100),
nk=np.linspace(-10, 5, 100) + 1j * np.linspace(0, 10, 100))
mat8 = Material('mat7', wls=np.linspace(300, 800, 100),
eps=np.linspace(-10, 5, 100) + 1j * np.linspace(0, 10, 100))
print('etaGold ', mat.get_n(800))
print('etaSilver ', mat1.get_n(800))
print('Glass (constant) ', mat3.get_n(800), mat3.get_k(800))
print('n=1.5 material ', mat5.get_n(550))
print('n=2.0+0.5j material ', mat6.get_n(550), mat6.get_k(550))
print('nk material ', mat7.get_n(550), mat7.get_k(550))
print('eps material ', mat8.get_n(550), mat8.get_k(550))
# input('Press enter')
with Profiler() as p:
wls = np.linspace(300, 800, 100)
# create SPR object
spr = SPR(wls)
spr.environment_material = 'glass'
# spr.set_spheres(SingleSphere(0.0, 0.0, 0.0, 25.0, 'etaGold.txt'))
spheres = ExplicitSpheres(2, [0, 0, 0, 10, 0, 0, 0, 12],
mat_filename=['nk/etaGold.txt',
'nk/etaSilver.txt'])
# spheres = ExplicitSpheres(2, [0,0,0,20,0,0,0,21],
# mat_filename='etaGold.txt')
spr.set_spheres(spheres)
# spr.set_spheres(LogNormalSpheres(27, 0.020, 0.9, 0.050 ))
# calculate!
# spr.command = ''
spr.simulate()
spr.plot()
# ~ input('Press enter')
| gpl-3.0 |
glennq/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
kashif/scikit-learn | sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
alexsavio/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
RayMick/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
msultan/msmbuilder | msmbuilder/project_templates/0-test-install.py | 9 | 2531 | """This script tests your python installation as it pertains to running project templates.
MSMBuilder supports Python 2.7 and 3.3+ and has some necessary dependencies
like numpy, scipy, and scikit-learn. This templated project enforces
some more stringent requirements to make sure all the users are more-or-less
on the same page and to allow developers to exploit more helper libraries.
You can modify the template scripts to work for your particular set-up,
but it's probably easier to install `conda` and get the packages we
recommend.
{{header}}
"""
import textwrap
# Show intro text
paragraphs = __doc__.split('\n\n')
for p in paragraphs:
print(textwrap.fill(p))
print()
warnings = 0
## Test for python 3.5
import sys
if sys.version_info < (3, 5):
print(textwrap.fill(
"These scripts were all developed on Python 3.5, "
"which is the current, stable release of Python. "
"In particular, we use subprocess.run "
"(and probably some other new features). "
"You can easily modify the scripts to work on older versions "
"of Python, but why not just upgrade? We like Continuum's "
"Anaconda Python distribution for a simple install (without root)."
))
print()
warnings += 1
## Test for matplotlib
try:
import matplotlib as plt
except ImportError:
print(textwrap.fill(
"These scripts try to make some mildly intesting plots. "
"That requires `matplotlib`."
))
print()
warnings += 1
## Test for seaborn
try:
import seaborn as sns
except ImportError:
print(textwrap.fill(
"The default matplotlib styling is a little ugly. "
"By default, these scripts try to use `seaborn` to make prettier "
"plots. You can remove all the seaborn imports if you don't want "
"to install this library, but why not just install it? Try "
"`conda install seaborn`"
))
print()
warnings += 1
## Test for xdg-open
try:
import subprocess
subprocess.check_call(['xdg-open', '--version'])
except:
print(textwrap.fill(
"For convenience, the plotting scripts can try to use `xdg-open` "
"to pop up the result of the plot. Use the --display flag on "
"msmb TemplateProject to enable this behavior."
))
warnings += 1
## Report results
if warnings == 0:
print("I didn't find any problems with your installation! Good job.")
print()
else:
print("I found {} warnings, see above. Good luck!".format(warnings))
print()
| lgpl-2.1 |
spallavolu/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
shrtCKT/simple-dnn | simple_dnn/dcgan_example.py | 1 | 3138 | # ---------------------------------------------------------------------------------------
# Example Usage
# ---------------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from generative.discriminator import DiscriminatorDC
from generative.gan import MultiClassGAN
from generative.generator import GeneratorDC
from util.format import UnitPosNegScale, reshape_pad
from util.sample_writer import ImageGridWriter
mnist = input_data.read_data_sets("../../data/MNIST_data/",
one_hot=True)
print mnist.train.images.shape
print mnist.train.labels.shape
discriminator = DiscriminatorDC(10, # y_dim
[16,32,64], # conv_units
hidden_units=None,
kernel_sizes=[5,5], strides=[2, 2], paddings='SAME',
d_activation_fn=tf.contrib.keras.layers.LeakyReLU,
f_activation_fns=tf.nn.relu,
dropout=False, keep_prob=0.5)
generator = GeneratorDC([32, 32],#x_dims
1, # x_ch
[64,32,16], # g_conv_units
g_kernel_sizes=[5,5], g_strides=[2, 2], g_paddings='SAME',
g_activation_fn=tf.nn.relu)
dcgan = MultiClassGAN([32, 32], # x_dim
1, # x_ch
10, # y_dim
z_dim=100,
generator=generator, # Generator Net
discriminator=discriminator, # Discriminator Net
x_reshape=reshape_pad([28,28], [32,32], 1, pad=True, pad_value=-1),
x_scale=UnitPosNegScale.scale,
x_inverse_scale=UnitPosNegScale.inverse_scale,
d_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_loss_fn='default',
d_label_smooth=0.75,
## Training config
batch_size=128,
iterations=5,
display_step=1,
save_step=500,
sample_writer= ImageGridWriter('../../data/figs/closed', grid_size=[6, 6],
img_dims=[32, 32])
# model_directory='../data/models/closed'
)
dcgan.fit(mnist.train.images, mnist.train.labels,
val_x=mnist.validation.images, val_y=mnist.validation.labels)
n_samples = 36
# ys_gen = np.zeros([n_samples, mnist.train.labels.shape[1] + 1])
# ys_gen[:, np.random.randint(0, mnist.train.labels.shape[1], size=n_samples)] = 1
gen_xs = dcgan.generate(n_samples)
gen_imgs = ImageGridWriter.merge_img(np.reshape(gen_xs[0:n_samples],[n_samples, dcgan.x_dims[0], dcgan.x_dims[1]]))
plt.imshow(gen_imgs, cmap='gray')
plt.show()
| gpl-3.0 |
pompiduskus/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
dcherian/tools | ROMS/pmacc/tools/post_tools/rompy/tags/rompy-0.1/rompy/utils.py | 1 | 15659 | import numpy as np
from matplotlib.mlab import griddata
__version__ = '0.1'
def interp_2d_latlon(lat,lon,data,lati,loni):
return griddata(lat.reshape(lat.size),lon.reshape(lon.size),data.reshape(data.size),lati,loni)
def interp_2d_xy(x,y,data,xi,yi):
try:
di = griddata(x.reshape(x.size),y.reshape(y.size),data.reshape(data.size),xi,yi)
except TypeError:
di = np.zeros(xi.size)
if x.ndim ==2 and y.ndim ==2:
x_vec = x[0,:]
y_vec = y[:,0]
elif x.ndim == 3 and y.ndim == 3:
x_vec = x[0,0,:]
y_vec = y[0,:,0]
else:
x_vec = x
y_vec = y
xl = np.nonzero(x_vec <= xi)[0][-1]
xh = np.nonzero(xi <= x_vec)[0][0]
yl = np.nonzero(y_vec <= yi)[0][-1]
yh = np.nonzero(yi <= y_vec)[0][0]
if not x_vec[xl] == x_vec[xh]:
xd = (xi-x_vec[xh])/(x_vec[xl]-x_vec[xh])
else:
xd = 1.
if not y_vec[yl] == y_vec[yh]:
yd = (yi-y_vec[yh])/(y_vec[yl]-y_vec[yh])
else:
yd = 1.
w0 = data[yl,xl]*(1-yd) + data[yh,xl]*yd
w1 = data[yl,xh]*(1-yd) + data[yh,xh]*yd
di = w0*(1-xd) + w1*xd
return di
def interp_2d_point(x,y,d,xi,yi):
if not x.ndim == 1 or not y.ndim == 1:
raise(TypeError,'interp_2d_from_point needs the x and y to be vectors')
if not xi.size == 1 or not yi.size == 1:
print(xi,yi,zi)
raise(TypeError, 'interp_2d_from_point needs xi and yi to be a single value')
try:
xl = np.nonzero(x <= xi)[0][-1]
xh = np.nonzero(xi <= x)[0][0]
yl = np.nonzero(y <= yi)[0][-1]
yh = np.nonzero(yi <= y)[0][0]
except IndexError, e:
print('x, xi')
print(x,xi)
print('y, yi')
print(y,yi)
if not x[xl] == x[xh]:
xd = (xi-x[xh])/(x[xl]-x[xh])
else:
xd = 1.
if not y[yl] == y[yh]:
yd = (yi-y[yh])/(y[yl]-y[yh])
else:
yd = 1.
w0 = d[yl,xl]*(1-yd) + d[yh,xl]*yd
w1 = d[yl,xh]*(1-yd) + d[yh,xh]*yd
return w0*(1-xd) + w1*xd
def interp_2d_from_list_of_points(x,y,z,d,p_list):
di = np.zeros(len(p_list),1)
for i in range(len(p_list)):
di[i] = interp_2d_point(x,y,d,p_list[i][0],p_list[i][1])
return di
def interp_2d(x,y,data,xi,yi):
if x.shape == y.shape and x.shape == data.shape:
# assume x and y are the same everywhere in their respective dimension
if x.ndim == 2:
x_vec = x[0,:]
else:
x_vec = x
if y.ndim == 2:
y_vec = y[:,0]
else:
y_vec = y
# assume xi and yi are vectors
if xi.ndim == 1 and yi.ndim == 1:
di = np.zeros((len(yi),len(xi)))
for i in range(len(xi)):
for j in range(len(yi)):
di[j,i] = interp_2d_point(x_vec,y_vec,data,xi[i],yi[j])
# if xi and yi are not vectors, then lets just do everything on a point by point basis.
elif xi.shape == yi.shape:
di = np.zeros(xi.shape)
for i in range(xi.size):
di.flat[i] = interp_2d_point(x_vec,y_vec,data,xi.flat[i],yi.flat[i])
return di
else:#elif (len(x),len(y)) == data.shape:
print('Do this the other way')
def interp_3d_point(x,y,z,d,xi,yi,zi):
if not x.ndim == 1 or not y.ndim == 1 or not z.ndim == 1:
raise(TypeError,'interp_3d_from_point needs the x, y, and z to be vectors')
if not xi.size == 1 or not yi.size == 1 or not zi.size ==1:
print(xi,yi,zi)
raise(TypeError, 'interp_3d_from_point needs xi, yi, and zi to be a single value')
try:
xl = np.nonzero(x <= xi)[0][-1]
xh = np.nonzero(xi <= x)[0][0]
yl = np.nonzero(y <= yi)[0][-1]
yh = np.nonzero(yi <= y)[0][0]
zl = np.nonzero(z <= zi)[0][-1]
zh = np.nonzero(zi <= z)[0][0]
except IndexError, e:
print('x, xi')
print(x,xi)
print('y, yi')
print(y,yi)
print('z, zi')
print(z,zi)
# print((xl,xi, xh),(yl, yi, yh),( zl, zi, zh))
if not x[xl] == x[xh]:
xd = (xi-x[xh])/(x[xl]-x[xh])
else:
xd = 1.
if not y[yl] == y[yh]:
yd = (yi-y[yh])/(y[yl]-y[yh])
else:
yd = 1.
if not z[zl] == z[zh]:
zd = (zi-z[zh])/(z[zl]-z[zh])
else:
zd = 1.
i0 = d[zl,yl,xl]*(1-zd) + d[zh,yl,xl]*zd
i1 = d[zl,yh,xl]*(1-zd) + d[zh,yh,xl]*zd
j0 = d[zl,yl,xh]*(1-zd) + d[zh,yl,xh]*zd
j1 = d[zl,yh,xh]*(1-zd) + d[zh,yh,xh]*zd
w0 = i0*(1-yd) + i1*yd
w1 = j0*(1-yd) + j1*yd
# cludge alert
if np.abs(w0) > 1.0e35 or np.abs(w1) > 1.0e35:
return np.nan
else:
return w0*(1-xd) + w1*xd
def interp_3d_from_list_of_points(x,y,z,d,p_list):
di = np.zeros(len(p_list),1)
for i in range(len(p_list)):
di[i] = interp_3d_point(x,y,z,d,p_list[i][0],p_list[i][1],p_list[i][2])
return di
def interp_3d(x,y,z,data,xi,yi,zi):
# we make a lot of assumptions about the incoming data. this is not a universal interpn
if x.shape == y.shape and x.shape == z.shape and x.shape == data.shape:
#print('Do this the hard way')
# assume x, y, and z are the same everywhere in their respective dimension
if x.ndim == 3:
x_vec = x[0,0,:]
else:
x_vec = x
if y.ndim == 3:
y_vec = y[0,:,0]
else:
y_vec = y
if z.ndim == 3:
z_vec = z[:,0,0]
else:
z_vec = z
# assume xi, yi, and zi are vectors
if xi.ndim == 1 and yi.ndim == 1 and zi.ndim == 1:
di = np.zeros((len(zi), len(yi),len(xi)))
for i in range(len(xi)):
for j in range(len(yi)):
for k in range(len(zi)):
di[k,j,i] = interp_3d_point(x_vec,y_vec,z_vec,data,xi[i],yi[j],zi[k])
# if xi, yi, and zi are not vectors, then lets just do everything on a point by point basis.
elif xi.shape == yi.shape and xi.shape == zi.shape:
#print("I'm in the right place")
di = np.zeros(xi.shape)
for i in range(xi.size):
di.flat[i] = interp_3d_point(x_vec,y_vec,z_vec,data,xi.flat[i],yi.flat[i],zi.flat[i])
return di
elif (len(x),len(y),len(z)) == data.shape:
print('Do this the other way')
def meshgrid(*xi,**kwargs):
"""
Return coordinate matrices from one or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : 'xy' or 'ij' (optional)
cartesian ('xy', default) or matrix ('ij') indexing of output
sparse : True or False (default) (optional)
If True a sparse grid is returned in order to conserve memory.
copy : True (default) or False (optional)
If False a view into the original arrays are returned in order to
conserve memory
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> x = np.linspace(0,1,3) # coordinates along x axis
>>> y = np.linspace(0,1,2) # coordinates along y axis
>>> xv, yv = meshgrid(x,y) # extend x and y for a 2D xy grid
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x,y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
>>> meshgrid(x,y,sparse=True,indexing='ij') # change to matrix indexing
[array([[ 0. ],
[ 0.5],
[ 1. ]]), array([[ 0., 1.]])]
>>> meshgrid(x,y,indexing='ij')
[array([[ 0. , 0. ],
[ 0.5, 0.5],
[ 1. , 1. ]]),
array([[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])]
>>> meshgrid(0,1,5) # just a 3D point
[array([[[0]]]), array([[[1]]]), array([[[5]]])]
>>> map(np.squeeze,meshgrid(0,1,5)) # just a 3D point
[array(0), array(1), array(5)]
>>> meshgrid(3)
array([3])
>>> meshgrid(y) # 1D grid; y is just returned
array([ 0., 1.])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
copy = kwargs.get('copy',True)
args = np.atleast_1d(*xi)
if not isinstance(args, list):
if args.size>0:
return args.copy() if copy else args
else:
raise TypeError('meshgrid() take 1 or more arguments (0 given)')
sparse = kwargs.get('sparse',False)
indexing = kwargs.get('indexing','xy') # 'ij'
ndim = len(args)
s0 = (1,)*ndim
output = [x.reshape(s0[:i]+(-1,)+s0[i+1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1,-1) + (1,)*(ndim-2)
output[1].shape = (-1, 1) + (1,)*(ndim-2)
shape[0],shape[1] = shape[1],shape[0]
if sparse:
if copy:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy:
mult_fact = np.ones(shape,dtype=int)
return [x*mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def ndgrid(*args,**kwargs):
"""
Same as calling meshgrid with indexing='ij' (see meshgrid for
documentation).
"""
kwargs['indexing'] = 'ij'
return meshgrid(*args,**kwargs)
def station_to_lat_lon(station_list):
lat_dict = {}
lon_dict = {}
lat_dict[4] = 48.2425
lon_dict[4] = -122.542
# lat_dict[6] = 47.925
# lon_dict[6] = -122.4685
lat_dict[6] = 47.9298
lon_dict[6] = -122.4932
lat_dict[7] = 47.9835
lon_dict[7] = -122.6201
lat_dict[7.5] = 47.9269
lon_dict[7.5] = -122.6418
lat_dict[8] = 47.8967
lon_dict[8] = -122.6053
lat_dict[8.5] = 47.8708
lon_dict[8.5] = -122.5848
lat_dict[9] = 47.8333
lon_dict[9] = -122.6673
lat_dict[10] = 47.8001
lon_dict[10] = -122.7198
lat_dict[11] = 47.3751
lon_dict[11] = -123.1375
lat_dict[11.1] = 47.36176418802982
lon_dict[11.1] = -123.063768617271
lat_dict[11.2] = 47.3550
lon_dict[11.2] = -123.0305
lat_dict[12] = 47.4272
lon_dict[12] = -123.1142
lat_dict[13] = 47.5471
lon_dict[13] = -123.008
lat_dict[14] = 47.6068
lon_dict[14] = -122.9399
lat_dict[15] = 47.6616
lon_dict[15] = -122.8601
# true station 16 lat lon
# lat_dict[16] = 47.6917
# lon_dict[16] = -122.6074
# fake station 16 lat lon
lat_dict[16] = 47.6793
lon_dict[16] = -122.7578
lat_dict[17] = 47.7356
lon_dict[17] = -122.7614
lat_dict[18] = 48.0303
lon_dict[18] = -122.6169
lat_dict[19] = 48.0915
lon_dict[19] = -122.6318
lat_dict[20] = 48.142
lon_dict[20] = -122.6848
lat_dict[21] = 48.1883
lon_dict[21] = -122.8504
lat_dict[22] = 48.2717
lon_dict[22] = -123.0189
lat_dict[24] = 48.3416
lon_dict[24] = -123.1249
# lat_dict[27] = 47.8133
# lon_dict[27] = -122.4050
lat_dict[27] = 47.7403
lon_dict[27] = -122.4103
lat_dict[28] = 47.7034
lon_dict[28] = -122.4544
lat_dict[29] = 47.5568
lon_dict[29] = -122.4433
lat_dict[30] = 47.4565
lon_dict[30] = -122.4084
# lat_dict[31] = 47.3937
# lon_dict[31] = -122.3601
lat_dict[31] = 47.3811
lon_dict[31] = -122.3487
lat_dict[32] = 47.3329
lon_dict[32] = -122.4427
lat_dict[33] = 47.3198
lon_dict[33] = -122.5008
lat_dict[33.5] = 47.3235
lon_dict[33.5] = -122.5621
lat_dict[34] = 47.28636086132807
lon_dict[34] = -122.5372204355572
lat_dict[34.5] = 47.19570992132258
lon_dict[34.5] = -122.6043449761104
lat_dict[35] = 47.1755994508588
lon_dict[35] = -122.6519213323306
lat_dict[35.5] = 47.1124942780185
lon_dict[35.5] = -122.6858130539733
lat_dict[35.6] = 47.11563089257614
lon_dict[35.6] = -122.7313463437406
lat_dict[36] = 47.20101267057353
lon_dict[36] = -122.825703220149
lat_dict[36.1] = 47.1670
lon_dict[36.1] = -122.8573269666242
lat_dict[36.2] = 47.14631182649271
lon_dict[36.2] = -122.9157302320745
lat_dict[36.3] = 47.07516793450768
lon_dict[36.3] = -122.9127057495704
lat_dict[401] = 47.49
lon_dict[401] = -123.0567
lat_dict[402] = 47.3635
lon_dict[402] = -123.0167366137608
lat_dict[402.1] = 47.37605785194047
lon_dict[402.1] = -123.0013100422375
lat_dict[403] = 47.40338787905921
lon_dict[403] = -122.9112721092632
lat_dict[403.1] = 47.42062136686801
lon_dict[403.1] = -122.8790276962233
lat_dict['admiralty inlet'] = 48.1734
lon_dict['admiralty inlet'] = -122.7557
lat = []
lon = []
for s in station_list:
lat.append(lat_dict[s])
lon.append(lon_dict[s])
return np.array(lon), np.array(lat)
def high_res_station_to_lat_lon(station_list,n=5):
lon, lat = station_to_lat_lon(station_list)
hr_lat = []
hr_lon = []
for i in range(len(lat)-1):
for j in range(n):
frac = float(j)/float(n)
hr_lat.append(lat[i]*(1-frac) + lat[i+1]*frac)
hr_lon.append(lon[i]*(1-frac) + lon[i+1]*frac)
hr_lat.append(lat[-1])
hr_lon.append(lon[-1])
return np.array(hr_lon), np.array(hr_lat)
def hood_canal_station_list():
# return [24,22,21,20,19,18,7,7.5,8,8.5,9,10,17,16,15,14,13,401,12,11,402]
return [7.5,8,8.5,9,10,17,16,15,14,13,401,12,11,11.1,11.2,402,402.1,403,403.1]
# return [11,11.1,11.2,402,402.1,403,403.1] # This is a close up of the nasty bits at the end of Hood Canal that are difficult to get right
def main_basin_station_list():
return [24,22,21,20,19,18,7,6,27,28,29,30,31,32,33,33.5,34,34.5,35,35.5,35.6,36, 36.1, 36.2, 36.3]
# return [7,6,27,28,29,30,31,32,33,33.5,34]
def hood_canal_xy():
return station_to_lat_lon(hood_canal_station_list())
def main_basin_xy():
return station_to_lat_lon(main_basin_station_list())
def high_res_hood_canal_xy(n=1):
if n == 1:
return station_to_lat_lon(hood_canal_station_list())
else:
return high_res_station_to_lat_lon(hood_canal_station_list(),n)
def high_res_main_basin_xy(n=1):
if n == 1:
return station_to_lat_lon(main_basin_station_list())
else:
return high_res_station_to_lat_lon(main_basin_station_list(),n)
def latlon_to_km(lat1,lon1,lat2,lon2):
RADIUS = 6378.0
# d=2*asin(sqrt((sin((lat1-lat2)/2))^2 + cos(lat1)*cos(lat2)*(sin((lon1-lon2)/2))^2))
lat1r = np.radians(lat1)
lon1r = np.radians(lon1)
lat2r = np.radians(lat2)
lon2r = np.radians(lon2)
rads=2*(np.arcsin(np.sqrt(np.power(np.sin((lat1r-lat2r)/2.0),2.0) + np.cos(lat1r)*np.cos(lat2r)*np.power(np.sin((lon1r-lon2r)/2.0),2.0))))
d = RADIUS*rads
return d
def coords_to_km(coords):
lon_list = coords['xm']
lat_list = coords['ym']
km_list = [0.0]
for i in range(len(lon_list)-1):
km_list.append(latlon_to_km(lat_list[i+1],lon_list[i+1],lat_list[i],lon_list[i]) + km_list[i])
return km_list
def station_list_to_km(sl):
lat,lon = station_to_lat_lon(sl)
return coords_to_km({'xm':lon,'ym':lat})
def offset_calc(x1,y1,x2,y2,x3,y3,x4,y4):
a = x2 - x1
b = x3 - x4
c = y2 - y1
d = y3 - y4
e = x3 - x1
f = y3 - y1
t = (d*e - b*f)/(a*d - b*c)
if t >= 0.0 and t < 1.0:
return t
else:
return None
def offset_region(coords,region='Admiralty Inlet'):
if region == 'Admiralty Inlet':
lon3 = -122.7090
lat3 = 48.1931
lon4 = -122.7774
lat4 = 48.1267
distances = coords_to_km(coords)
lat = coords['ym']
lon = coords['xm']
rval = 0.0
for i in range(len(lat)-1):
lat1 = lat[i]
lat2 = lat[i+1]
lon1 = lon[i]
lon2 = lon[i+1]
t = offset_calc(lon1,lat1,lon2,lat2,lon3,lat3,lon4,lat4)
if t >= 0.0 and t < 1.0:
rval = distances[i] + t*(distances[i+1] - distances[i])
return rval
| mit |
tjctw/PythonNote | thinkstat/hypothesis.py | 2 | 6341 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import Cdf
import cumulative
import math
import myplot
import random
import thinkstats
import matplotlib.pyplot as pyplot
def RunTest(root,
pool,
actual1,
actual2,
iters=1000,
trim=False,
partition=False):
"""Computes the distributions of delta under H0 and HA.
Args:
root: string filename root for the plots
pool: sequence of values from the pooled distribution
actual1: sequence of values in group 1
actual2: sequence of values in group 2
iters: how many resamples
trim: whether to trim the sequences
partition: whether to cross-validate by partitioning the data
"""
if trim:
pool.sort()
actual1.sort()
actual2.sort()
pool = thinkstats.Trim(pool)
actual1 = thinkstats.Trim(actual1)
actual2 = thinkstats.Trim(actual2)
if partition:
n = len(actual1)
m = len(actual2)
actual1, model1 = Partition(actual1, n/2)
actual2, model2 = Partition(actual2, m/2)
pool = model1 + model2
else:
model1 = actual1
model2 = actual2
# P(E|H0)
peh0 = Test(root + '_deltas_cdf',
actual1, actual2, pool, pool,
iters, plot=True)
# P(E|Ha)
peha = Test(root + '_deltas_ha_cdf',
actual1, actual2, model1, model2,
iters)
prior = 0.5
pe = prior*peha + (1-prior)*peh0
posterior = prior*peha / pe
print 'Posterior', posterior
def Test(root, actual1, actual2, model1, model2, iters=1000, plot=False):
"""Estimates p-values based on differences in the mean.
Args:
root: string filename base for plots
actual1:
actual2: sequences of observed values for groups 1 and 2
model1:
model2: sequences of values from the hypothetical distributions
iters: how many resamples
plot: whether to plot the distribution of differences in the mean
"""
n = len(actual1)
m = len(actual2)
mu1, mu2, delta = DifferenceInMean(actual1, actual2)
delta = abs(delta)
cdf, pvalue = PValue(model1, model2, n, m, delta, iters)
print 'n:', n
print 'm:', m
print 'mu1', mu1
print 'mu2', mu2
print 'delta', delta
print 'p-value', pvalue
if plot:
PlotCdf(root, cdf, delta)
return pvalue
def DifferenceInMean(actual1, actual2):
"""Computes the difference in mean between two groups.
Args:
actual1: sequence of float
actual2: sequence of float
Returns:
tuple of (mu1, mu2, mu1-mu2)
"""
mu1 = thinkstats.Mean(actual1)
mu2 = thinkstats.Mean(actual2)
delta = mu1 - mu2
return mu1, mu2, delta
def PValue(model1, model2, n, m, delta, iters=1000):
"""Computes the distribution of deltas with the model distributions.
And the p-value of the observed delta.
Args:
model1:
model2: sequences of values from the hypothetical distributions
n: sample size from model1
m: sample size from model2
delta: the observed difference in the means
iters: how many samples to generate
"""
deltas = [Resample(model1, model2, n, m) for i in range(iters)]
mean_var = thinkstats.MeanVar(deltas)
print '(Mean, Var) of resampled deltas', mean_var
cdf = Cdf.MakeCdfFromList(deltas)
# compute the two tail probabilities
left = cdf.Prob(-delta)
right = 1.0 - cdf.Prob(delta)
pvalue = left + right
print 'Tails (left, right, total):', left, right, left+right
return cdf, pvalue
def PlotCdf(root, cdf, delta):
"""Draws a Cdf with vertical lines at the observed delta.
Args:
root: string used to generate filenames
cdf: Cdf object
delta: float observed difference in means
"""
def VertLine(x):
"""Draws a vertical line at x."""
xs = [x, x]
ys = [0, 1]
pyplot.plot(xs, ys, linewidth=2, color='0.7')
VertLine(-delta)
VertLine(delta)
xs, ys = cdf.Render()
pyplot.subplots_adjust(bottom=0.11)
pyplot.plot(xs, ys, linewidth=2, color='blue')
myplot.Save(root,
title='Resampled differences',
xlabel='difference in means (weeks)',
ylabel='CDF(x)',
legend=False)
def Resample(t1, t2, n, m):
"""Draws samples and computes the difference in mean.
Args:
t1: sequence of values
t2: sequence of values
n: size of the sample to draw from t1
m: size of the sample to draw from t2
"""
sample1 = SampleWithReplacement(t1, n)
sample2 = SampleWithReplacement(t2, m)
mu1, mu2, delta = DifferenceInMean(sample1, sample2)
return delta
def Partition(t, n):
"""Splits a sequence into two random partitions.
Side effect: shuffles t
Args:
t: sequence of values
n: size of the first partition
Returns:
two lists of values
"""
random.shuffle(t)
return t[:n], t[n:]
def SampleWithReplacement(t, n):
"""Generates a sample with replacement.
Args:
t: sequence of values
n: size of the sample
Returns:
list of values
"""
return [random.choice(t) for i in range(n)]
def SampleWithoutReplacement(t, n):
"""Generates a sample without replacement.
Args:
t: sequence of values
n: size of the sample
Returns:
list of values
"""
return random.sample(t, n)
def main():
random.seed(1)
# get the data
pool, firsts, others = cumulative.MakeTables()
mean_var = thinkstats.MeanVar(pool.lengths)
print '(Mean, Var) of pooled data', mean_var
# run the test
RunTest('length',
pool.lengths,
firsts.lengths,
others.lengths,
iters=1000,
trim=False,
partition=False)
if __name__ == "__main__":
main()
| cc0-1.0 |
jereze/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
dongjoon-hyun/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 30 | 60826 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
supertuxkart/stk-stats | userreport/views/usercount.py | 2 | 1862 | import logging
from userreport.models import UserReport
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
import matplotlib.artist
LOG = logging.getLogger(__name__)
@cache_page(60 * 120)
def report_user_count(request):
reports = UserReport.objects.order_by('upload_date')
users_by_date = {}
for report in reports:
t = report.upload_date.date() # group by day
users_by_date.setdefault(t, set()).add(report.user_id_hash)
seen_users = set()
data_scatter = ([], [], [])
for date, users in sorted(users_by_date.items()):
data_scatter[0].append(date)
data_scatter[1].append(len(users))
data_scatter[2].append(len(users - seen_users))
seen_users |= users
fig = Figure(figsize=(12, 6))
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.2)
ax.plot(data_scatter[0], data_scatter[1], marker='o')
ax.plot(data_scatter[0], data_scatter[2], marker='o')
ax.legend(('Total users', 'New users'), 'upper left', frameon=False)
matplotlib.artist.setp(ax.get_legend().get_texts(), fontsize='small')
ax.set_ylabel('Number of users per day')
for label in ax.get_xticklabels():
label.set_rotation(90)
label.set_fontsize(9)
ax.xaxis.set_major_formatter(DateFormatter('%d-%m-%Y'))
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
try:
canvas.print_png(response, dpi=80)
except ValueError:
LOG.warning('On displaying usercount data(possible empty stats)')
return HttpResponse('<h1>Warning: No stats data available</h1>')
return response
| mit |
shenzebang/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
frank-tancf/scikit-learn | sklearn/cross_decomposition/pls_.py | 34 | 30531 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_cont_Rot/Geneva_cont_Rot_6/fullgrid/UV2.py | 31 | 9339 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines_cntd.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
Ledoux/ShareYourSystem | Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleCell copy 4.py | 2 | 2372 |
#ImportModules
import ShareYourSystem as SYS
from ShareYourSystem.Specials.Simulaters import Populater,Brianer
#Definition
MyBrianer=Brianer.BrianerClass(
).update(
{
#Set here the global net parameters
'StimulatingStepTimeFloat':0.1
}
).produce(
['E','I'],
Populater.PopulaterClass,
{
#Here are defined the brian classic shared arguments between pops
'brian.NeuronGroupInspectDict':SYS.InspectDict().update(
{
'LiargVariablesList':[
0,
'''
dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt
dge/dt = -ge/(5*ms) : volt
dgi/dt = -gi/(10*ms) : volt
'''
],
'KwargVariablesDict':
{
'threshold':'v>-50*mV'
'reset':'v=-60*mV'
}
}
),
#Here are the settig of future brian monitors
'push':
{
'LiargVariablesList':
[
[
Moniter.MoniterClass.update(
{
'brian.SpikeMonitorInspectDict':SYS.InspectDict()
}
)
],
],
'KwargVariablesDict':{'CollectingCollectionStr':'Monitome'}
},
#Init conditions
'PopulatingInitDict':
{
'v':-60.
}
},
**{'CollectingCollectionStr':'Populatome'}
).__setitem__(
'Dis_<Populatome>',
#Here are defined the brian classic specific arguments for each pop
[
{
'Exec_NeuronGroupInspectDict["LiargVariablesList"][0]':3200,
'ConnectingGraspClueVariablesList':
[
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Populatome>IPopulater',
'SynapseArgumentVariable':
{
'pre':'ge+=1.62*mV'
'connect':{'p':0.02}
}
}
)
]
},
{
'Exec_NeuronGroupInspectDict["LiargVariablesList"][0]':800,
'ConnectingGraspClueVariablesList':
[
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Populatome>EPopulater',
'SynapseArgumentVariable':
{
'pre':'gi-=9*mV'
'connect':{'p':0.02}
}
}
)
]
}
]
).brian()
#Definition the AttestedStr
SYS._attest(
[
'MyBrianer is '+SYS._str(
MyBrianer,
**{
'RepresentingBaseKeyStrsList':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#SYS._print(MyBrianer.BrianedMonitorsList[0].__dict__)
#SYS._print(
# MyBrianer.BrianedNeuronGroupsList[0].__dict__
#)
#import matplotlib
#plot(MyBrianer['<Connectome>FirstRater'].)
#Print
| mit |
ecell/bioimaging | scopyon/analysis/hmm.py | 1 | 18269 | import numpy
from hmmlearn.base import _BaseHMM
from hmmlearn.hmm import _check_and_set_gaussian_n_features
from hmmlearn import _utils
class FullPTHMM(_BaseHMM):
r"""Hidden Markov Model for Particle Tracking.
Args:
n_components (int): Number of states.
min_var (float, optional): Floor on the variance to prevent overfitting.
Defaults to 1e-5.
startprob_prior (array, optional):
shape (n_components, ). Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior (array, optional):
shape (n_components, n_components). Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm (string, optional):
Decoder algorithm. Must be one of "viterbi" or`"map".
Defaults to "viterbi".
random_state (RandomState or an int seed, optional):
A random number generator instance.
n_iter (int, optional): Maximum number of iterations to perform.
tol (float, optional):
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose (bool, optional):
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params (string, optional):
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
init_params (string, optional):
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
Attributes:
monitor\_ (ConvergenceMonitor):
Monitor object used to check the convergence of EM.
startprob\_ (array): shape (n_components, ).
Initial state occupation distribution.
transmat\_ (array): shape (n_components, n_components).
Matrix of transition probabilities between states.
diffusivities\_ (array): shape (n_components, 1).
Diffusion constants for each state.
intensity_means\_ (array): shape (n_components, 1).
Mean parameters of intensity distribution for each state.
intensity_vars\_ (array): shape (n_components, 1).
Variance parameters of intensity distribution for each state.
"""
def __init__(self, n_components=1,
min_var=1e-5,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="stdmv", init_params="stdmv"):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.min_var = min_var
def _check(self):
super()._check()
self.diffusivities_ = numpy.asarray(self.diffusivities_)
assert self.diffusivities_.shape == (self.n_components, 1)
self.intensity_means_ = numpy.asarray(self.intensity_means_)
assert self.intensity_means_.shape == (self.n_components, 1)
self.intensity_vars_ = numpy.asarray(self.intensity_vars_)
assert self.intensity_vars_.shape == (self.n_components, 1)
self.n_features = 1
def _generate_sample_from_state(self, state, random_state=None):
D = self.diffusivities_[state]
mean = self.intensity_means_[state]
var = self.intensity_vars_[state]
return numpy.hstack([
numpy.sqrt(numpy.power(random_state.normal(scale=numpy.sqrt(2 * D), size=2), 2).sum(keepdims=True)),
random_state.normal(loc=mean, scale=numpy.sqrt(var), size=(1, )),
])
def _get_n_fit_scalars_per_param(self):
nc = self.n_components
nf = self.n_features
return {
"s": nc - 1,
"t": nc * (nc - 1),
"d": nc * nf,
"m": nc * nf,
"v": nc * nf,
}
def _init(self, X, lengths=None):
_check_and_set_gaussian_n_features(self, X)
super()._init(X, lengths=lengths)
_, n_features = X.shape
if hasattr(self, 'n_features') and self.n_features != n_features:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (n_features, self.n_features))
self.n_features = n_features
if 'd' in self.init_params or not hasattr(self, "diffusivities_"):
diffusivity_means = numpy.mean(X[:, [0]], axis=0) * 0.25
variations = numpy.arange(1, self.n_components + 1)
variations = variations / variations.sum()
self.diffusivities_ = diffusivity_means * variations[:, numpy.newaxis]
if 'm' in self.init_params or not hasattr(self, "intensity_means_"):
from sklearn import cluster
kmeans = cluster.KMeans(n_clusters=self.n_components,
random_state=self.random_state)
kmeans.fit(X[:, [1]])
self.intensity_means_ = kmeans.cluster_centers_
if 'v' in self.init_params or not hasattr(self, "intensity_vars_"):
var = numpy.var(X[:, [1]].T) + self.min_var
self.intensity_vars_ = numpy.tile([var], (self.n_components, 1))
def _initialize_sufficient_statistics(self):
stats = super()._initialize_sufficient_statistics()
stats['post'] = numpy.zeros(self.n_components)
stats['obs1**2'] = numpy.zeros((self.n_components, 1))
stats['obs2'] = numpy.zeros((self.n_components, 1))
stats['obs2**2'] = numpy.zeros((self.n_components, 1))
return stats
def _compute_log_likelihood(self, X):
D = self.diffusivities_
mean = self.intensity_means_
var = self.intensity_vars_
# print("D=", D)
# print("mean=", mean)
# print("var=", var)
if not all(var > 0):
raise ValueError(f'Variance must be positive [{var}]')
q1 = numpy.log(X[:, [0]] / (2 * D[:, 0])) - (X[:, [0]] ** 2 / (4 * D[:, 0]))
q2 = -0.5 * numpy.log(2 * numpy.pi * var[:, 0]) - (X[:, [1]] - mean[:, 0]) ** 2 / (2 * var[:, 0])
return q1 + q2
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice):
super()._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if any(param in self.params for param in 'dmv'):
stats['post'] += posteriors.sum(axis=0)
if 'd' in self.params:
stats['obs1**2'] += numpy.dot(posteriors.T, obs[:, [0]] ** 2)
if 'm' in self.params:
stats['obs2'] += numpy.dot(posteriors.T, obs[:, [1]])
if 'v' in self.params:
stats['obs2**2'] += numpy.dot(posteriors.T, obs[:, [1]] ** 2)
def _do_mstep(self, stats):
super()._do_mstep(stats)
denom = stats['post'][:, numpy.newaxis]
if 'd' in self.params:
self.diffusivities_ = 0.25 * stats['obs1**2'] / denom
if 'm' in self.params:
self.intensity_means_ = stats['obs2'] / denom
if 'v' in self.params:
self.intensity_vars_ = (
stats['obs2**2'] - 2 * self.intensity_means_ * stats['obs2'] + self.intensity_means_ ** 2 * denom) / denom
class PTHMM(_BaseHMM):
r"""Hidden Markov Model for Particle Tracking.
Args:
n_diffusivities (int): Number of diffusivity states.
n_oligomers (int): Number of oligomeric states.
n_components is equal to (n_diffusivities * n_oliogmers).
min_var (float, optional): Floor on the variance to prevent overfitting.
Defaults to 1e-5.
startprob_prior (array, optional):
shape (n_components, ). Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior (array, optional):
shape (n_components, n_components). Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm (string, optional):
Decoder algorithm. Must be one of "viterbi" or`"map".
Defaults to "viterbi".
random_state (RandomState or an int seed, optional):
A random number generator instance.
n_iter (int, optional): Maximum number of iterations to perform.
tol (float, optional):
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose (bool, optional):
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params (string, optional):
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
init_params (string, optional):
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for startprob,
't' for transmat, 'd' for diffusivities, 'm' for intensity means
and 'v' for intensity variances. Defaults to all parameters.
Attributes:
monitor\_ (ConvergenceMonitor):
Monitor object used to check the convergence of EM.
startprob\_ (array): shape (n_components, ).
Initial state occupation distribution.
transmat\_ (array): shape (n_components, n_components).
Matrix of transition probabilities between states.
diffusivities\_ (array): shape (n_diffusivities, 1).
Diffusion constants for each state.
intensity_means\_ (array): shape (1, 1).
Base mean parameter of intensity distributions.
intensity_vars\_ (array): shape (1, 1).
Base Variance parameter of intensity distributions.
"""
def __init__(self, n_diffusivities=3, n_oligomers=4,
min_var=1e-5,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="stdmv", init_params="stdmv"):
_BaseHMM.__init__(self, n_diffusivities * n_oligomers,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=tol, params=params, verbose=verbose,
init_params=init_params)
self.min_var = min_var
self.n_diffusivities = n_diffusivities
self.n_oligomers = n_oligomers
assert self.n_components == self.n_diffusivities * self.n_oligomers
def _check(self):
super()._check()
self.diffusivities_ = numpy.asarray(self.diffusivities_)
assert self.diffusivities_.shape == (self.n_diffusivities, 1)
self.intensity_means_ = numpy.asarray(self.intensity_means_)
assert self.intensity_means_.shape == (1, 1)
self.intensity_vars_ = numpy.asarray(self.intensity_vars_)
assert self.intensity_vars_.shape == (1, 1)
self.n_features = 2
def _generate_sample_from_state(self, state, random_state=None):
m = state // self.n_oligomers
n = state % self.n_oligomers
mean = self.intensity_means_[0] * (n + 1)
var = self.intensity_vars_[0] * (n + 1)
D = self.diffusivities_[m]
return numpy.hstack([
numpy.sqrt(numpy.power(random_state.normal(scale=numpy.sqrt(2 * D), size=2), 2).sum(keepdims=True)),
random_state.normal(loc=mean, scale=numpy.sqrt(var), size=(1, )),
])
def _get_n_fit_scalars_per_param(self):
return {
"s": self.n_components - 1,
"t": self.n_components * (self.n_components - 1),
"d": self.n_diffusivities,
"m": 1,
"v": 1,
}
def _init(self, X, lengths=None):
_check_and_set_gaussian_n_features(self, X)
super()._init(X, lengths=lengths)
_, n_features = X.shape
assert n_features == 2
if hasattr(self, 'n_features') and self.n_features != n_features:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (n_features, self.n_features))
self.n_features = n_features
if 'd' in self.init_params or not hasattr(self, "diffusivities_"):
diffusivity_means = numpy.mean(X[:, [0]], axis=0) * 0.25
variations = numpy.arange(1, self.n_diffusivities + 1)
variations = variations / variations.sum()
self.diffusivities_ = diffusivity_means * variations[:, numpy.newaxis]
if 'm' in self.init_params or not hasattr(self, "intensity_means_"):
# kmeans = cluster.KMeans(n_clusters=self.n_components,
# random_state=self.random_state)
# kmeans.fit(X[:, [1]])
# self.intensity_means_ = kmeans.cluster_centers_
self.intensity_means_ = numpy.array([[numpy.average(X[:, 1]) * 0.5]])
if 'v' in self.init_params or not hasattr(self, "intensity_vars_"):
var = numpy.var(X[:, [1]].T) + self.min_var
self.intensity_vars_ = numpy.array([[var]])
def _initialize_sufficient_statistics(self):
stats = super()._initialize_sufficient_statistics()
stats['post'] = numpy.zeros(self.n_components)
stats['obs1**2'] = numpy.zeros((self.n_components, 1))
stats['obs2'] = numpy.zeros((self.n_components, 1))
stats['obs2**2'] = numpy.zeros((self.n_components, 1))
return stats
def _compute_log_likelihood(self, X):
# D = self.diffusivities_
D = numpy.repeat(self.diffusivities_, self.n_oligomers, axis=0)
mean = self.intensity_means_[0, 0]
mean *= numpy.tile(numpy.arange(1, self.n_oligomers + 1), (1, self.n_diffusivities)).T
var = self.intensity_vars_[0, 0]
var *= numpy.tile(numpy.arange(1, self.n_oligomers + 1), (1, self.n_diffusivities)).T
if any(var <= 0.0):
raise ValueError(f'Variance must be positive [{var}]')
q1 = numpy.log(X[:, [0]] / (2 * D[:, 0])) - (X[:, [0]] ** 2 / (4 * D[:, 0]))
q2 = -0.5 * numpy.log(2 * numpy.pi * var[:, 0]) - (X[:, [1]] - mean[:, 0]) ** 2 / (2 * var[:, 0])
# print("mean=", mean)
# print("var=", var)
# print("self.intensity_means_.shape=", self.intensity_means_.shape)
# print("self.intensity_vars_.shape=", self.intensity_vars_.shape)
# print("q1.shape=", q1.shape)
# print("q2.shape=", q2.shape)
return q1 + q2
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice):
super()._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if any(param in self.params for param in 'dmv'):
stats['post'] += posteriors.sum(axis=0)
if 'd' in self.params:
stats['obs1**2'] += numpy.dot(posteriors.T, obs[:, [0]] ** 2)
if 'm' in self.params:
stats['obs2'] += numpy.dot(posteriors.T, obs[:, [1]])
if 'v' in self.params:
stats['obs2**2'] += numpy.dot(posteriors.T, obs[:, [1]] ** 2)
# print("posteriors=", posteriors.shape)
# print("obs=", obs.shape)
# print("stats['post']=", stats['post'].shape)
# print("stats['obs1**2']=", stats['obs1**2'].shape)
# print("stats['obs2']=", stats['obs2'].shape)
# print("stats['obs2**2']=", stats['obs2**2'].shape)
# assert False
def _do_mstep(self, stats):
super()._do_mstep(stats)
denom = stats['post'][:, numpy.newaxis]
# print("denom=", denom.shape)
# print("stats['post']=", stats['post'].shape)
# print("stats['obs1**2']=", stats['obs1**2'].shape)
# print("stats['obs2']=", stats['obs2'].shape)
# print("stats['obs2**2']=", stats['obs2**2'].shape)
# print("diffusivities_=", self.diffusivities_)
# print("intensity_means_=", self.intensity_means_)
# print("intensity_vars_=", self.intensity_vars_)
if 'd' in self.params:
k = numpy.repeat(numpy.identity(self.n_diffusivities), self.n_oligomers, axis=1)
self.diffusivities_ = 0.25 * numpy.dot(k, stats['obs1**2']) / numpy.dot(k, denom)
if 'm' in self.params:
post = denom
x = stats['obs2']
k = numpy.tile(numpy.arange(1, self.n_oligomers + 1), (1, self.n_diffusivities))
self.intensity_means_ = x.sum(axis=0) / numpy.dot(k, post)
if 'v' in self.params:
post = denom
x = stats['obs2']
x2 = stats['obs2**2']
mu = self.intensity_means_
k = numpy.tile(numpy.arange(1, self.n_oligomers + 1), (1, self.n_diffusivities))
self.intensity_vars_ = (numpy.dot(1 / k, x2) - 2 * mu * x.sum(axis=0) + mu ** 2 * numpy.dot(k, post)) / post.sum(axis=0)
| bsd-3-clause |
jcchin/Hyperloop_v2 | paper/images/trade_scripts/boundary_layer_length_plot.py | 4 | 1027 | import numpy as np
import matplotlib.pyplot as plt
L_pod = np.loadtxt('../data_files/boundary_layer_length_trades/L_pod.txt', delimiter = '\t')
A_tube = np.loadtxt('../data_files/boundary_layer_length_trades/A_tube.txt', delimiter = '\t')
fig = plt.figure(figsize = (3.25,3.5), tight_layout = True)
ax = plt.axes()
plt.setp(ax.get_xticklabels(), fontsize=8)
plt.setp(ax.get_yticklabels(), fontsize=8)
line1, = plt.plot(L_pod, A_tube[0,:], 'b-', linewidth = 2.0, label = 'A_pod = 2.0 $m^2$')
line2, = plt.plot(L_pod, A_tube[1,:], 'r-', linewidth = 2.0, label = 'A_pod = 2.5 $m^2$')
line3, = plt.plot(L_pod, A_tube[2,:], 'g-', linewidth = 2.0, label = 'A_pod = 3.0 $m^2$')
plt.xlabel('Pod Length (m)', fontsize = 10, fontweight = 'bold')
plt.ylabel('Tube Area ($m^2$)', fontsize = 10, fontweight = 'bold')
plt.ylim([15,45])
plt.legend(handles = [line1, line2, line3], loc = 2, fontsize = 8)
plt.grid('on')
plt.savefig('../graphs/boundary_layer_length_trades/Tube_Area_vs_pod_length.png', format = 'png', dpi = 300)
plt.show()
| apache-2.0 |
theflofly/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
radajin/naver_news | web/recommend.py | 1 | 6032 | from flask import Flask, render_template, jsonify, session, request
import pickle, operator, itertools
import numpy as np
import scipy as sp
from scipy import spatial
import pandas as pd
app = Flask(__name__)
def load_datas(date="2016-06-01"):
# load article df
file = open("../data/article_" + date + ".plk", 'rb')
article_df = pickle.load(file)
article_df = article_df[np.invert(article_df.duplicated(subset="newsid"))] # remove duplication
article_df = article_df[article_df["comment"] > 500]
file.close()
# load commend df
file = open("../data/comment_" + date + ".plk", 'rb')
comment_df = pickle.load(file)
comment_df = comment_df[(comment_df["good"] > 0) & (comment_df["bad"] > 0)].reset_index(drop=True) # remove good:0, bad:0
comment_df = comment_df[comment_df["userIdNo"].str.len() < 10] # remove userIdNo > 10
comment_df["aid"] = comment_df["aid"].apply(lambda aid: int(aid)) # change aid data type to int
file.close()
return article_df, comment_df
def analytics_comments(comments):
category_dict = {"0":0,"1":0,"2":0,"3":0,"4":0,"5":0}
classification_model = pickle.load(open("./models/classification_model.plk", "rb"))
category_list = []
for comment in comments:
category = str(classification_model.predict([comment])[0])
category_list.append(category)
category_dict[str(category)] += 1
max_category = max(category_dict.items(), key=operator.itemgetter(1))[0]
return category_dict, max_category, category_list
def category_recommend(category):
article_df, comment_df = load_datas()
return article_df[article_df["category"] == int(category)].sort_values(by="comment", ascending=False)
def recommend(userId):
def remove_duplicate(list1, list2):
for idx in list2:
list1 = [x for x in list1 if x != idx]
return list1
# load model & aritcle, comment dataframe
recommend_model = pickle.load(open("./models/recommend_model.plk", "rb"))
article_df, comment_df = load_datas()
# set data from model
unique_user = recommend_model['unique_user']
article_list = recommend_model['article_list']
datas = recommend_model['datas']
predict = recommend_model['predict']
# find user index
idx = list(unique_user).index(userId)
# set recommend article & recommend predict point
recomend_article = article_list[datas[idx, :] == 0]
recomend_predict = predict[idx, :][datas[idx, :] == 0]
recomend_article = recomend_article[recomend_predict > 0]
recomend_predict = recomend_predict[recomend_predict > 0]
# set return datas
recommend_article_list = []
comments = list(comment_df[comment_df["userIdNo"] == userId]["contents"])
category_dict, max_category, category_list = analytics_comments(comments)
# article list
aritcle_list = list(category_recommend(max_category)["newsid"])
# comment list
tmp_df = comment_df[comment_df["userIdNo"] == userId]
comment_list = list(set(tmp_df["aid"]))
# remove duplication
aritcle_list = remove_duplicate(aritcle_list, comment_list)[:5]
if len(recomend_article) != 0:
# recommend article sorting
result_list = []
for i in range(len(recomend_article)):
result_list.append((recomend_article[i], recomend_predict[i]))
sorted_recommend_article = sorted(result_list, key=lambda tup: tup[1])
recommend_aritcle_list, dist_list = zip(*sorted_recommend_article)
recommend_aritcle_list = recommend_aritcle_list[::-1]
# remove duplicate
aritcle_list = remove_duplicate(aritcle_list, recommend_aritcle_list)
# concat recommend_article + category_recommend_article
aritcle_list = list(recommend_aritcle_list) + list(article_list)
aritcle_list = aritcle_list[:5]
else:
print("No Recomend")
# set result recomend article list
for aritcle in aritcle_list:
article = article_df[article_df["newsid"] == int(aritcle)]
recommend_dict = {
'newspaper': article['newspaper'].values[0],
'title': article['title'].values[0],
'link': article['link'].values[0],
'content': article['content'].values[0],
}
recommend_article_list.append(recommend_dict)
return recommend_article_list, comments, category_dict, max_category, category_list
def userList():
recommend_model = pickle.load(open("./models/recommend_model.plk", "rb"))
# set data from model
return list(recommend_model['unique_user'])
# userList = ['28qA1', '7G80r', '85fbU', '3EQjn', 'Iqis', 'jE62', '5UM3g', '6j7iu', '3Bpiw', '6ij6t']
# return userList
def mae_mean():
def mae(data, predict):
delta = data[data > 0] - predict[data > 0]
return np.absolute(delta).sum()/len(delta)
recommend_model = pickle.load(open("./models/recommend_model.plk", "rb"))
# set data from model
datas = recommend_model['datas']
predict = recommend_model['predict']
unique_user = recommend_model['unique_user']
article_list = recommend_model['article_list']
mae_list = []
for idx in range(len(datas)):
result_mae = mae(datas[idx,:], predict[idx,:])
mae_list.append(result_mae)
return np.array(mae_list).mean(), len(unique_user), len(article_list)
# HTML webpage
@app.route('/')
def user():
return render_template('index.html')
# retruns a piece of data in JSON format
@app.route('/api/<command>', methods=['GET', 'POST'])
def api(command):
result = {}
# recommend
if command == "recommend":
userId = request.args.get('userId', '')
recommend_article_list, comments, category_dict, max_category, category_list = recommend(userId)
result = {
'recommend_article_list': recommend_article_list,
'comments':comments,
'category_dict':category_dict,
'max_category':max_category,
'category_list':category_list,
'status_code': 200,
}
elif command == "userList":
result = {
'user': userList(),
'status_code': 200,
}
elif command == "evaluation":
mae, user_num, article_num = mae_mean()
result = {
'mae_mean' : mae,
'article': article_num,
'user': user_num,
'status_code': 200,
}
return jsonify(result)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=80, debug=True)
| mit |
petroniocandido/pyFTS | pyFTS/tests/transformations.py | 1 | 5449 | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
from pyFTS.common import Util as cUtil, FuzzySet
from pyFTS.partitioners import Grid, Entropy, Util as pUtil, Simple
from pyFTS.benchmarks import benchmarks as bchmk, Measures
from pyFTS.models import chen, yu, cheng, ismailefendi, hofts, pwfts, tsaur, song, sadaei, ifts
from pyFTS.models.ensemble import ensemble
from pyFTS.common import Membership, Util
from pyFTS.benchmarks import arima, quantreg, BSTS, gaussianproc, knn
from pyFTS.common import Transformations
tdiff = Transformations.Differential(1)
boxcox = Transformations.BoxCox(0)
from pyFTS.data import Enrollments, AirPassengers
'''
data = AirPassengers.get_data()
roi = Transformations.ROI()
#plt.plot(data)
_roi = roi.apply(data)
#plt.plot(_roi)
plt.plot(roi.inverse(_roi, data))
'''
'''
data = AirPassengers.get_dataframe()
data['Month'] = pd.to_datetime(data['Month'], format='%Y-%m')
trend = Transformations.LinearTrend(data_field='Passengers', index_field='Month',
index_type='datetime', datetime_mask='%Y-%d')
trend.train(data)
plt.plot(data['Passengers'].values)
plt.plot(trend.trend(data))
detrend = trend.apply(data)
plt.plot(trend.inverse(detrend, data, date_offset=pd.DateOffset(months=1)))
'''
'''
data = Enrollments.get_dataframe()
trend = Transformations.LinearTrend(data_field='Enrollments', index_field='Year')
trend.train(data)
plt.plot(data['Enrollments'].values)
plt.plot(trend.trend(data)) #)
detrend = trend.apply(data)
plt.plot(trend.inverse(detrend, data))
'''
#dataset = pd.read_csv('https://query.data.world/s/nxst4hzhjrqld4bxhbpn6twmjbwqk7')
#dataset['data'] = pd.to_datetime([str(y)+'-'+str(m) for y,m in zip(dataset['Ano'].values, dataset['Mes'].values)],
# format='%Y-%m')
roi = Transformations.ROI()
'''
train = dataset['Total'].values[:30]
test = dataset['Total'].values[30:]
fs = Grid.GridPartitioner(data=train, npart=5, transformation=roi)
from pyFTS.models import hofts, pwfts
model = pwfts.ProbabilisticWeightedFTS(partitioner=fs, order=2)
#model = hofts.WeightedHighOrderFTS(partitioner=fs, order=1)
model.append_transformation(roi)
model.fit(train)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10,5])
ax.plot(test)
'''
'''
train = dataset.iloc[:30]
test = dataset.iloc[30:]
from pyFTS.models.multivariate import common, variable, mvfts, wmvfts, granular
from pyFTS.partitioners import Grid, Entropy
from pyFTS.models.seasonal.common import DateTime
from pyFTS.models.seasonal import partitioner as seasonal
sp = {'seasonality': DateTime.month , 'names': ['Jan','Fev','Mar','Abr','Mai','Jun','Jul', 'Ago','Set','Out','Nov','Dez']}
vmonth = variable.Variable("Month", data_label="data", partitioner=seasonal.TimeGridPartitioner, npart=12,
data=train, partitioner_specific=sp)
vtur = variable.Variable("Turistas", data_label="Total", alias='tur',
partitioner=Grid.GridPartitioner, npart=20, transformation=roi,
data=train)
#model = wmvfts.WeightedMVFTS(explanatory_variables=[vmonth, vtur], target_variable=vtur)
model = granular.GranularWMVFTS(explanatory_variables=[vmonth, vtur], target_variable=vtur, order=2, knn=1)
model.fit(train)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10,5])
ax.plot(test['Total'].values)
forecast = model.predict(test)
for k in np.arange(model.order):
forecast.insert(0,None)
ax.plot(forecast)
plt.show()
print(dataset)
'''
eto = pd.read_csv('https://raw.githubusercontent.com/PatriciaLucas/Evapotranspiracao/master/ETo_setelagoas.csv', sep=',')
eto['Data'] = pd.to_datetime(eto["Data"], format='%Y-%m-%d')
from pyFTS.models.multivariate import common, variable, mvfts, wmvfts, granular
from pyFTS.models import hofts, pwfts
from pyFTS.partitioners import Grid, Entropy
from pyFTS.common import Membership
from pyFTS.models.seasonal.common import DateTime
from pyFTS.models.seasonal import partitioner as seasonal
from pyFTS.benchmarks import Measures
from pyFTS.benchmarks import arima, quantreg, knn, benchmarks as bchmk
variables = {
"Month": dict(data_label="Data", partitioner=seasonal.TimeGridPartitioner, npart=6),
"Eto": dict(data_label="Eto", alias='eto',
partitioner=Grid.GridPartitioner, npart=50)
}
methods = [mvfts.MVFTS, wmvfts.WeightedMVFTS, granular.GranularWMVFTS]
time_generator = lambda x : pd.to_datetime(x) + pd.to_timedelta(1, unit='d')
parameters = [
{},{},
dict(fts_method=pwfts.ProbabilisticWeightedFTS, fuzzyfy_mode='both',
order=1, knn=3)
]
bchmk.multivariate_sliding_window_benchmarks2(eto, 2000, train=0.8, inc=0.2,
methods=methods,
methods_parameters=parameters,
variables=variables,
target_variable='Eto',
type='point',
steps_ahead=[7],
file="hyperparam.db", dataset='Eto',
tag="experiments",
generators= {'Data': time_generator}
)
| gpl-3.0 |
Cophy08/ggplot | tests.py | 13 | 1099 | #!/usr/bin/env python
#
# This allows running the ggplot tests from the command line: e.g.
#
# $ python tests.py -v -d
#
# The arguments are identical to the arguments accepted by nosetests.
#
# See https://nose.readthedocs.org/ for a detailed description of
# these options.
import os
import time
import matplotlib
matplotlib.use('agg')
import nose
from matplotlib.testing.noseclasses import KnownFailure
from matplotlib import font_manager
# Make sure the font caches are created before starting any possibly
# parallel tests
if font_manager._fmcache is not None:
while not os.path.exists(font_manager._fmcache):
time.sleep(0.5)
plugins = [KnownFailure]
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
from nose.plugins import multiprocess
multiprocess._instantiate_plugins = plugins
from ggplot.tests import default_test_modules
def run():
nose.main(addplugins=[x() for x in plugins],
defaultTest=default_test_modules)
if __name__ == '__main__':
run()
| bsd-2-clause |
weissercn/MLTools | Dalitz_simplified/optimisation/dt/classifier_eval_wrapper.py | 1 | 1299 | import numpy as np
import math
import sys
sys.path.insert(0,'../..')
import os
import classifier_eval_simplified
from sklearn import tree
# Write a function like this called 'main'
def main(job_id, params):
print 'Anything printed here will end up in the output directory for job #%d' % job_id
print params
comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data_optimisation.0.0.txt",os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data_optimisation.200.1.txt")]
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high4Dgauss_optimisation_10000_0.5_0.1_0.0_1.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high4Dgauss_optimisation_10000_0.5_0.1_0.01_1.txt")]
clf = tree.DecisionTreeClassifier('gini','best',params['max_depth'], params['min_samples_split'], 1, 0.0, None)
args=["dalitz","particle","antiparticle",100,comp_file_list,2,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13)]
result= classifier_eval_simplified.classifier_eval(2,0,args)
with open("dt_optimisation_values.txt", "a") as myfile:
myfile.write(str(params['max_depth'][0])+"\t"+ str(params['min_samples_split'][0])+"\t"+str(result)+"\n")
return result
| mit |
RomainBrault/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
agarciamontoro/TFG | Software/Raytracer/raytracer.py | 1 | 14968 | from .universe import universe
from .Utils.logging_utils import LoggingClass
import os
import numpy as np
from numpy import pi as Pi
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pycuda import driver, compiler, gpuarray
import jinja2
# When importing this module we are initializing the device.
# Now, we can call the device and send information using
# the apropiate tools in the pycuda module.
import pycuda.autoinit
__logmodule__ = True
# Set directories for correct handling of paths
selfDir = os.path.dirname(os.path.abspath(__file__))
softwareDir = os.path.abspath(os.path.join(selfDir, os.pardir))
def spher2cart(points):
# Retrieve the actual data
r = points[:, 0]
theta = points[:, 1]
phi = points[:, 2]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * sinT * cosP
y = r * sinT * sinP
z = r * cosT
return x, y, z
SPHERE = 0
DISK = 1
HORIZON = 2
STRAIGHT = 3
class RayTracer(metaclass=LoggingClass):
"""Relativistic spacetime ray tracer.
This class generates images of what an observer would see near a rotating
black hole.
This is an abstraction layer over the CUDA kernel that integrates the ODE
system specified in equations (A.15) of Thorne's paper. It integrates,
backwards in time, a set of rays near a Kerr black hole, computing its
trajectories from the focal point of a camera located near the black hole.
The RayTracer class hides all the black magic behind the CUDA code, giving
a nice and simple interface to the user that just wants some really cool,
and scientifically accurate, images.
Given a scene composed by a camera, a Kerr metric and a black hole, the
RayTracer just expects a time :math:`x_{end}` to solve the system.
Example:
Define the characteristics of the black hole and build it::
spin = 0.9999999999
innerDiskRadius = 9
outerDiskRadius = 20
blackHole = BlackHole(spin, innerDiskRadius, outerDiskRadius)
Define the specifications of the camera and build it::
camR = 30
camTheta = 1.511
camPhi = 0
camFocalLength = 3
camSensorShape = (1000, 1000) # (Rows, Columns)
camSensorSize = (2, 2) # (Height, Width)
camera = Camera(camR, camTheta, camPhi,
camFocalLength, camSensorShape, camSensorSize)
Create a Kerr metric with the previous two objects::
kerr = KerrMetric(camera, blackHole)
Set the speed of the camera once the Kerr metric and the black hole are
created: it needs some info from both of these objects::
camera.setSpeed(kerr, blackHole)
Finally, build the raytracer with the camera, the metric and the black
hole...::
rayTracer = RayTracer(camera, kerr, blackHole)
...and generate the image!::
rayTracer.rayTrace(-90)
rayTracer.synchronise()
rayTracer.plotImage()
"""
def __init__(self, camera, debug=False):
self.debug = debug
self.systemSize = 5
# Set up the necessary objects
self.camera = camera
# Get the number of rows and columns of the final image
self.imageRows = self.camera.sensorShape[0]
self.imageCols = self.camera.sensorShape[1]
self.numPixels = self.imageRows * self.imageCols
# Compute the block and grid sizes: given a fixed block dimension of 64
# threads (in an 8x8 shape), the number of blocks are computed to get
# at least as much threads as pixels
# Fixed size block dimension: 8x8x1
self.blockDimCols = 8
self.blockDimRows = 8
self.blockDim = (self.blockDimCols, self.blockDimRows, 1)
# Grid dimension computed to cover all the pixels with a thread (there
# will be some idle threads)
self.gridDimCols = int(((self.imageCols - 1) / self.blockDimCols) + 1)
self.gridDimRows = int(((self.imageRows - 1) / self.blockDimRows) + 1)
self.gridDim = (self.gridDimCols, self.gridDimRows, 1)
print(self.blockDim, self.gridDim)
# Render the kernel
self._kernelRendering()
# Compute the initial conditions
self._setUpInitCond()
# Create two timers to measure the time
self.start = driver.Event()
self.end = driver.Event()
# Initialise a variatble to store the total time of computation between
# all calls
self.totalTime = 0.
def _kernelRendering(self):
# We must construct a FileSystemLoader object to load templates off
# the filesystem
templateLoader = jinja2.FileSystemLoader(searchpath=selfDir)
# An environment provides the data necessary to read and
# parse our templates. We pass in the loader object here.
templateEnv = jinja2.Environment(loader=templateLoader)
# Read the template file using the environment object.
# This also constructs our Template object.
templatePath = os.path.join('Kernel', 'common.jj')
template = templateEnv.get_template(templatePath)
codeType = "double"
# Specify any input variables to the template as a dictionary.
templateVars = {
"IMG_ROWS": self.imageRows,
"IMG_COLS": self.imageCols,
"NUM_PIXELS": self.imageRows*self.imageCols,
# Camera constants
"D": self.camera.focalLength,
"CAM_R": self.camera.r,
"CAM_THETA": self.camera.theta,
"CAM_PHI": self.camera.phi,
"CAM_BETA": self.camera.speed,
# Black hole constants
"SPIN": universe.spin,
"SPIN2": universe.spinSquared,
"B1": universe.b1,
"B2": universe.b2,
"HORIZON_RADIUS": universe.horizonRadius,
"INNER_DISK_RADIUS": universe.accretionDisk.innerRadius,
"OUTER_DISK_RADIUS": universe.accretionDisk.outerRadius,
# Kerr metric constants
"RO": self.camera.metric.ro,
"DELTA": self.camera.metric.delta,
"POMEGA": self.camera.metric.pomega,
"ALPHA": self.camera.metric.alpha,
"OMEGA": self.camera.metric.omega,
# Camera rotation angles
"PITCH": np.float64(self.camera.pitch),
"ROLL": np.float64(self.camera.roll),
"YAW": np.float64(self.camera.yaw),
# RK45 solver constants
"R_TOL_I": 1e-6,
"A_TOL_I": 1e-12,
"SAFE": 0.9,
"SAFE_INV": 1/0.9,
"FAC_1": 0.2,
"FAC_1_INV": 1 / 0.2,
"FAC_2": 10.0,
"FAC_2_INV": 1 / 10.0,
"BETA": 0.04,
"UROUND": 2.3e-16,
"MIN_RESOL": -0.1,
"MAX_RESOL": -2.0,
# Constants for the alternative version of the solver
"SOLVER_DELTA": 0.03125,
"SOLVER_EPSILON": 1e-6,
# Convention for ray status
"SPHERE": SPHERE, # A ray that has not yet collide with anything.
"DISK": DISK, # A ray that has collided with the disk.
"HORIZON": HORIZON, # A ray that has collided with the black hole.
# Data type
"REAL": codeType,
# Number of equations
"SYSTEM_SIZE": self.systemSize,
"DATA_SIZE": 2,
# Debug switch
"DEBUG": "#define DEBUG" if self.debug else ""
}
# Finally, process the template to produce our final text.
kernel = template.render(templateVars)
# Store it in the file that will be included by all the other compiled
# files
filePath = os.path.join(selfDir, 'Kernel', 'common.cu')
with open(filePath, 'w') as outputFile:
outputFile.write(kernel)
# ======================= KERNEL COMPILATION ======================= #
# Compile the kernel code using pycuda.compiler
kernelFile = os.path.join(selfDir, "Kernel", "raytracer.cu")
mod = compiler.SourceModule(open(kernelFile, "r").read(),
include_dirs=[selfDir, softwareDir])
# Get the initial kernel function from the compiled module
self._setInitialConditions = mod.get_function("setInitialConditions")
# Get the solver function from the compiled module
self._solve = mod.get_function("kernel")
# Get the image generation function from the compiled module
self.generateImage = mod.get_function("generate_image")
# # Get the collision detection function from the compiled module
# self._detectCollisions = mod.get_function("detectCollisions")
def _setUpInitCond(self):
# Array to compute the ray's initial conditions
self.systemState = np.empty((self.imageRows, self.imageCols,
self.systemSize))
# Array to compute the ray's constants
self.constants = np.empty((self.imageRows, self.imageCols, 2))
# Array to store the rays status:
# 0: A ray that has not yet collide with anything.
# 1: A ray that has collided with the horizon.
# 2: A ray that has collided with the black hole.
self.rayStatus = np.zeros((self.imageRows, self.imageCols),
dtype=np.int32)
# Send them to the GPU
self.systemStateGPU = gpuarray.to_gpu(self.systemState)
self.constantsGPU = gpuarray.to_gpu(self.constants)
self.rayStatusGPU = gpuarray.to_gpu(self.rayStatus)
# Compute the initial conditions
self._setInitialConditions(
self.systemStateGPU,
self.constantsGPU,
np.float64(self.camera.pixelWidth),
np.float64(self.camera.pixelHeight),
# Grid definition -> number of blocks x number of blocks.
# Each block computes the direction of one pixel
grid=self.gridDim,
# Block definition -> number of threads x number of threads
# Each thread in the block computes one RK4 step for one equation
block=self.blockDim
)
# TODO: Remove this copy, inefficient!
# Retrieve the computed initial conditions
self.systemState = self.systemStateGPU.get()
self.constants = self.constantsGPU.get()
def callKernel(self, x, xEnd):
self._solve(
np.float64(x),
np.float64(xEnd),
self.systemStateGPU,
np.float64(-0.001),
np.float64(xEnd - x),
self.constantsGPU,
self.rayStatusGPU,
# Grid definition -> number of blocks x number of blocks.
# Each block computes the direction of one pixel
grid=self.gridDim,
# Block definition -> number of threads x number of threads
# Each thread in the block computes one RK4 step for one
# equation
block=self.blockDim
)
def rayTrace(self, xEnd, kernelCalls=1):
"""
Args:
xEnd (float): Time in which the system will be integrated. After
this method finishes, the value of the rays at t=xEnd will be
known
stepsPerKernel (integer): The number of steps each kernel call will
compute; i.e., the host will call the kernel
xEnd / (resolution*stepsPerKernel) times.
resolution (float): The size of the interval that will be used to
compute one solver step between successive calls to the
collision detection method.
"""
# Initialize current time
x = np.float64(0)
# Compute iteration interval
interval = xEnd / kernelCalls
# Send the rays to the outer space!
for _ in range(kernelCalls):
print(x, x+interval)
# Start timing
self.start.record()
# Call the kernel!
self.callKernel(x, x + interval)
# Update time
x += interval
# End timing
self.end.record()
self.end.synchronize()
# Calculate the run length
self.totalTime += self.start.time_till(self.end)*1e-3
self.synchronise()
return self.rayStatus, self.systemState
def slicedRayTrace(self, xEnd, numSteps=100):
stepSize = xEnd / numSteps
# Initialize plotData with the initial position of the rays
self.plotData = np.zeros((self.imageRows, self.imageCols,
3, numSteps+1))
self.plotData[:, :, :, 0] = self.systemState[:, :, :3]
# Initialize plotStatus with a matriz full of zeros
self.plotStatus = np.empty((self.imageRows, self.imageCols,
numSteps+1), dtype=np.int32)
self.plotStatus[:, :, 0] = 0
x = 0
for step in range(numSteps):
# Solve the system
self.callKernel(x, x + stepSize)
# Advance the step and synchronise
x += stepSize
self.synchronise()
# Get the data and store it for future plot
self.plotData[:, :, :, step + 1] = self.systemState[:, :, :3]
self.plotStatus[:, :, step + 1] = self.rayStatus
return self.plotStatus, self.plotData
def synchronise(self):
self.rayStatus = self.rayStatusGPU.get()
self.systemState = self.systemStateGPU.get()
def texturedImage(self, disk, sphere):
"""Image should be a 2D array where each entry is a 3-tuple of Reals
between 0.0 and 1.0
"""
diskGPU = gpuarray.to_gpu(disk)
sphereGPU = gpuarray.to_gpu(sphere)
self.image = np.empty((self.imageRows, self.imageCols, 3),
dtype=np.float64)
imageGPU = gpuarray.to_gpu(self.image)
self.generateImage(
self.systemStateGPU,
self.rayStatusGPU,
diskGPU,
np.int32(disk.shape[0]),
np.int32(disk.shape[1]),
sphereGPU,
np.int32(sphere.shape[0]),
np.int32(sphere.shape[1]),
imageGPU,
# Grid definition -> number of blocks x number of blocks.
# Each block computes the direction of one pixel
grid=self.gridDim,
# Block definition -> number of threads x number of threads
# Each thread in the block computes one RK4 step for one equation
block=self.blockDim
)
self.image = imageGPU.get()
return self.image
| gpl-2.0 |
mjudsp/Tsallis | sklearn/ensemble/tests/test_voting_classifier.py | 25 | 8160 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
muxiaobai/CourseExercises | python/tianchi/20180201yancheng/201802/model20180223.py | 1 | 2398 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#https://www.kaggle.com/dansbecker/selecting-and-filtering-in-pandas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn import cross_validation
from sklearn import svm
from sklearn.learning_curve import learning_curve
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.externals import joblib
train = pd.read_table('../train_20171215.txt')
test= pd.read_table('../test_A_20171225.txt')
#test= pd.read_table('../answer_top_A_20180225.txt')
#print train_data.describe()
actions1 = train.groupby(['date','day_of_week'], as_index=False)['cnt'].agg({'count1':np.sum})
df_train_target = actions1['count1'].values
df_train_data = actions1.drop(['count1'],axis = 1).values
# 切分数据(训练集和测试集)
cv = cross_validation.ShuffleSplit(len(df_train_data), n_iter=5,test_size=0.2,random_state=0)
'''
print "GradientBoostingRegressor"
for train, test in cv:
gbdt = GradientBoostingRegressor().fit(df_train_data[train], df_train_target[train])
result1 = gbdt.predict(df_train_data[test])
print(mean_squared_error(result1,df_train_target[test]))
print '......'
'''
predict_cons = ['date','day_of_week']
X = train[predict_cons]
y = train.cnt
train_x,val_x,train_y,val_y = train_test_split(X,y,test_size = 0.2,random_state= 0)
print "GradientBoostingRegressor"
gbdt = GradientBoostingRegressor(n_estimators = 1000,max_leaf_nodes = 400)
gbdt.fit(X, y)#17083
#RandomForestRegressor 93 16938
#GradientBoostingRegressor 90 16866
print mean_absolute_error(val_y,gbdt.predict(val_x))
print(mean_squared_error(val_y,gbdt.predict(val_x)))
# predict and save output
#print ("The predictions are")
predicted_test_prices = gbdt.predict(test[predict_cons])
int_cnt = np.around(predicted_test_prices)
my_submission = pd.DataFrame({'date':test.date,'cnt':int_cnt.astype(int)})
my_submission.to_csv('submission20180223.csv',index = False,header = False,columns = ['date','cnt'])
my_submission.to_csv('result20180223.txt',index=False,header=False,columns = ['date','cnt'],sep='\t')
| gpl-2.0 |
pablocarderam/genetargeter | gRNAScores/Rule_Set_2_scoring_v1/analysis/models/ensembles.py | 1 | 6362 | from __future__ import print_function
from __future__ import division
from builtins import range
import numpy as np
import sklearn.linear_model
import sklearn.ensemble as en
from sklearn.model_selection import GridSearchCV
import sklearn
from sklearn.linear_model import LinearRegression
import scipy as sp
from gRNAScores.Rule_Set_2_scoring_v1.analysis.models.regression import linreg_on_fold
import sklearn
import sklearn.tree as tree
from sklearn import svm
def spearman_scoring(clf, X, y):
y_pred = clf.predict(X).flatten()
return sp.stats.spearmanr(y_pred, y.flatten())[0]
def adaboost_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
'''
AdaBoostRegressor from scikitlearn.
'''
if learn_options['adaboost_version'] == 'python':
if not learn_options['adaboost_CV']:
clf = en.GradientBoostingRegressor(loss=learn_options['adaboost_loss'], learning_rate=learn_options['adaboost_learning_rate'],
n_estimators=learn_options['adaboost_n_estimators'],
alpha=learn_options['adaboost_alpha'],
subsample=1.0, min_samples_split=2, min_samples_leaf=1, max_depth=learn_options['adaboost_max_depth'],
init=None, random_state=None, max_features=None,
verbose=0, max_leaf_nodes=None, warm_start=False)
clf.fit(X[train], y[train].flatten())
y_pred = clf.predict(X[test])[:, None]
else:
print("Adaboost with GridSearch")
from sklearn.grid_search import GridSearchCV
param_grid = {'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 5, 6, 7],
'min_samples_leaf': [5, 7, 10, 12, 15],
'max_features': [1.0, 0.5, 0.3, 0.1]}
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values[train])
gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
n_folds = len(np.unique(gene_classes))
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
est = en.GradientBoostingRegressor(loss=learn_options['adaboost_loss'], n_estimators=learn_options['adaboost_n_estimators'])
clf = GridSearchCV(est, param_grid, n_jobs=20, verbose=1, cv=cv, scoring=spearman_scoring, iid=False).fit(X[train], y[train].flatten())
print(clf.best_params_)
y_pred = clf.predict(X[test])[:, None]
else:
raise NotImplementedError
return y_pred, clf
def LASSOs_ensemble_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
train_indices = np.where(train)[0]
sel = len(train_indices)*0.10
permuted_ind = np.random.permutation(train_indices)
valid_indices = permuted_ind[:sel]
train_indices = permuted_ind[sel:]
train_sub = np.zeros_like(train, dtype=bool)
valid_sub = np.zeros_like(train, dtype=bool)
train_sub[train_indices] = True
valid_sub[valid_indices] = True
validations = np.zeros((len(valid_indices), len(list(feature_sets.keys()))))
predictions = np.zeros((test.sum(), len(list(feature_sets.keys()))))
for i, feature_name in enumerate(feature_sets.keys()):
X_feature = feature_sets[feature_name].values
y_pred, m = linreg_on_fold(feature_sets, train_sub, valid_sub, y, y_all, X_feature, dim, dimsum, learn_options)
predictions[:, i] = m.predict(X_feature[test]).flatten()
validations[:, i] = y_pred.flatten()
clf = LinearRegression()
clf.fit(validations, y[valid_sub])
y_pred = clf.predict(predictions)
return y_pred, None
def randomforest_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
'''
RandomForestRegressor from scikitlearn.
'''
clf = en.RandomForestRegressor(oob_score=True)
clf.fit(X[train], y[train][:, 0])
y_pred = clf.predict(X[test])[:, None]
return y_pred, clf
def decisiontree_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
'''
DecisionTreeRegressor from scikitlearn.
'''
clf = tree.DecisionTreeRegressor()
clf.fit(X[train], y[train][:, 0])
y_pred = clf.predict(X[test])[:, None]
return y_pred, clf
def linear_stacking(y_train, X_train, X_test):
clf = sklearn.linear_model.LinearRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
return y_pred.flatten()
def pairwise_majority_voting(y):
N = y.shape[0]
y_pred = np.zeros((N, N))
for i in range(N):
for j in range(N):
if i == j:
continue
y_pred[i, j] = (y[i] > y[j]).sum() > old_div(y.shape[1],2)
return old_div(y_pred.sum(1),y_pred.sum(1).max())
def median(y):
return np.median(y, axis=1)
def GBR_stacking(y_train, X_train, X_test):
param_grid = {'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [2, 3, 4, 5], # [2, 3, 4, 6],
'min_samples_leaf': [1, 2, 3], # ,5, 7],
'max_features': [1.0, 0.5, 0.3, 0.1]}
est = en.GradientBoostingRegressor(loss='ls', n_estimators=100)
clf = GridSearchCV(est, param_grid, n_jobs=3, verbose=1, cv=20, scoring=spearman_scoring).fit(X_train, y_train.flatten())
# clf.fit(X_train, y_train.flatten())
return clf.predict(X_test)
def GP_stacking(y_train, X_train, X_test):
import GPy
m = GPy.models.SparseGPRegression(X_train, y_train, num_inducing=20, kernel=GPy.kern.RBF(X_train.shape[1]))
m.optimize('bfgs', messages=0)
y_pred = m.predict(X_test)[0]
return y_pred.flatten()
def SVM_stacking(y_train, X_train, X_test):
parameters = {'kernel': ('linear', 'rbf'), 'C': np.linspace(1, 10, 10), 'gamma': np.linspace(1e-3, 1., 10)}
svr = svm.SVR()
clf = GridSearchCV(svr, parameters, n_jobs=3, verbose=1, cv=10, scoring=spearman_scoring)
clf.fit(X_train, y_train.flatten())
return clf.predict(X_test)
| mit |
TomAugspurger/pandas | doc/make.py | 1 | 11462 | #!/usr/bin/env python3
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
Usage
-----
$ python make.py clean
$ python make.py html
$ python make.py latex
"""
import argparse
import csv
import importlib
import os
import shutil
import subprocess
import sys
import webbrowser
import docutils
import docutils.parsers.rst
DOC_PATH = os.path.dirname(os.path.abspath(__file__))
SOURCE_PATH = os.path.join(DOC_PATH, "source")
BUILD_PATH = os.path.join(DOC_PATH, "build")
REDIRECTS_FILE = os.path.join(DOC_PATH, "redirects.csv")
class DocBuilder:
"""
Class to wrap the different commands of this script.
All public methods of this class can be called as parameters of the
script.
"""
def __init__(
self,
num_jobs=0,
include_api=True,
single_doc=None,
verbosity=0,
warnings_are_errors=False,
):
self.num_jobs = num_jobs
self.verbosity = verbosity
self.warnings_are_errors = warnings_are_errors
if single_doc:
single_doc = self._process_single_doc(single_doc)
include_api = False
os.environ["SPHINX_PATTERN"] = single_doc
elif not include_api:
os.environ["SPHINX_PATTERN"] = "-api"
self.single_doc_html = None
if single_doc and single_doc.endswith(".rst"):
self.single_doc_html = os.path.splitext(single_doc)[0] + ".html"
elif single_doc:
self.single_doc_html = f"reference/api/pandas.{single_doc}.html"
def _process_single_doc(self, single_doc):
"""
Make sure the provided value for --single is a path to an existing
.rst/.ipynb file, or a pandas object that can be imported.
For example, categorial.rst or pandas.DataFrame.head. For the latter,
return the corresponding file path
(e.g. reference/api/pandas.DataFrame.head.rst).
"""
base_name, extension = os.path.splitext(single_doc)
if extension in (".rst", ".ipynb"):
if os.path.exists(os.path.join(SOURCE_PATH, single_doc)):
return single_doc
else:
raise FileNotFoundError(f"File {single_doc} not found")
elif single_doc.startswith("pandas."):
try:
obj = pandas # noqa: F821
for name in single_doc.split("."):
obj = getattr(obj, name)
except AttributeError as err:
raise ImportError(f"Could not import {single_doc}") from err
else:
return single_doc[len("pandas.") :]
else:
raise ValueError(
f"--single={single_doc} not understood. "
"Value should be a valid path to a .rst or .ipynb file, "
"or a valid pandas object "
"(e.g. categorical.rst or pandas.DataFrame.head)"
)
@staticmethod
def _run_os(*args):
"""
Execute a command as a OS terminal.
Parameters
----------
*args : list of str
Command and parameters to be executed
Examples
--------
>>> DocBuilder()._run_os('python', '--version')
"""
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
def _sphinx_build(self, kind: str):
"""
Call sphinx to build documentation.
Attribute `num_jobs` from the class is used.
Parameters
----------
kind : {'html', 'latex'}
Examples
--------
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
"""
if kind not in ("html", "latex"):
raise ValueError(f"kind must be html or latex, not {kind}")
cmd = ["sphinx-build", "-b", kind]
if self.num_jobs:
cmd += ["-j", str(self.num_jobs)]
if self.warnings_are_errors:
cmd += ["-W", "--keep-going"]
if self.verbosity:
cmd.append(f"-{'v' * self.verbosity}")
cmd += [
"-d",
os.path.join(BUILD_PATH, "doctrees"),
SOURCE_PATH,
os.path.join(BUILD_PATH, kind),
]
return subprocess.call(cmd)
def _open_browser(self, single_doc_html):
"""
Open a browser tab showing single
"""
url = os.path.join("file://", DOC_PATH, "build", "html", single_doc_html)
webbrowser.open(url, new=2)
def _get_page_title(self, page):
"""
Open the rst file `page` and extract its title.
"""
fname = os.path.join(SOURCE_PATH, f"{page}.rst")
option_parser = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
)
doc = docutils.utils.new_document("<doc>", option_parser.get_default_values())
with open(fname) as f:
data = f.read()
parser = docutils.parsers.rst.Parser()
# do not generate any warning when parsing the rst
with open(os.devnull, "a") as f:
doc.reporter.stream = f
parser.parse(data, doc)
section = next(
node for node in doc.children if isinstance(node, docutils.nodes.section)
)
title = next(
node for node in section.children if isinstance(node, docutils.nodes.title)
)
return title.astext()
def _add_redirects(self):
"""
Create in the build directory an html file with a redirect,
for every row in REDIRECTS_FILE.
"""
with open(REDIRECTS_FILE) as mapping_fd:
reader = csv.reader(mapping_fd)
for row in reader:
if not row or row[0].strip().startswith("#"):
continue
path = os.path.join(BUILD_PATH, "html", *row[0].split("/")) + ".html"
try:
title = self._get_page_title(row[1])
except Exception:
# the file can be an ipynb and not an rst, or docutils
# may not be able to read the rst because it has some
# sphinx specific stuff
title = "this page"
if os.path.exists(path):
raise RuntimeError(
f"Redirection would overwrite an existing file: {path}"
)
with open(path, "w") as moved_page_fd:
html = f"""\
<html>
<head>
<meta http-equiv="refresh" content="0;URL={row[1]}.html"/>
</head>
<body>
<p>
The page has been moved to <a href="{row[1]}.html">{title}</a>
</p>
</body>
<html>"""
moved_page_fd.write(html)
def html(self):
"""
Build HTML documentation.
"""
ret_code = self._sphinx_build("html")
zip_fname = os.path.join(BUILD_PATH, "html", "pandas.zip")
if os.path.exists(zip_fname):
os.remove(zip_fname)
if ret_code == 0:
if self.single_doc_html is not None:
self._open_browser(self.single_doc_html)
else:
self._add_redirects()
return ret_code
def latex(self, force=False):
"""
Build PDF documentation.
"""
if sys.platform == "win32":
sys.stderr.write("latex build has not been tested on windows\n")
else:
ret_code = self._sphinx_build("latex")
os.chdir(os.path.join(BUILD_PATH, "latex"))
if force:
for i in range(3):
self._run_os("pdflatex", "-interaction=nonstopmode", "pandas.tex")
raise SystemExit(
"You should check the file "
'"build/latex/pandas.pdf" for problems.'
)
else:
self._run_os("make")
return ret_code
def latex_forced(self):
"""
Build PDF documentation with retries to find missing references.
"""
return self.latex(force=True)
@staticmethod
def clean():
"""
Clean documentation generated files.
"""
shutil.rmtree(BUILD_PATH, ignore_errors=True)
shutil.rmtree(os.path.join(SOURCE_PATH, "reference", "api"), ignore_errors=True)
def zip_html(self):
"""
Compress HTML documentation into a zip file.
"""
zip_fname = os.path.join(BUILD_PATH, "html", "pandas.zip")
if os.path.exists(zip_fname):
os.remove(zip_fname)
dirname = os.path.join(BUILD_PATH, "html")
fnames = os.listdir(dirname)
os.chdir(dirname)
self._run_os("zip", zip_fname, "-r", "-q", *fnames)
def main():
cmds = [method for method in dir(DocBuilder) if not method.startswith("_")]
joined = ",".join(cmds)
argparser = argparse.ArgumentParser(
description="pandas documentation builder", epilog=f"Commands: {joined}",
)
joined = ", ".join(cmds)
argparser.add_argument(
"command", nargs="?", default="html", help=f"command to run: {joined}",
)
argparser.add_argument(
"--num-jobs", type=int, default=0, help="number of jobs used by sphinx-build"
)
argparser.add_argument(
"--no-api", default=False, help="omit api and autosummary", action="store_true"
)
argparser.add_argument(
"--single",
metavar="FILENAME",
type=str,
default=None,
help=(
"filename (relative to the 'source' folder) of section or method name to "
"compile, e.g. 'development/contributing.rst', "
"'ecosystem.rst', 'pandas.DataFrame.join'"
),
)
argparser.add_argument(
"--python-path", type=str, default=os.path.dirname(DOC_PATH), help="path"
)
argparser.add_argument(
"-v",
action="count",
dest="verbosity",
default=0,
help=(
"increase verbosity (can be repeated), "
"passed to the sphinx build command"
),
)
argparser.add_argument(
"--warnings-are-errors",
"-W",
action="store_true",
help="fail if warnings are raised",
)
args = argparser.parse_args()
if args.command not in cmds:
joined = ", ".join(cmds)
raise ValueError(f"Unknown command {args.command}. Available options: {joined}")
# Below we update both os.environ and sys.path. The former is used by
# external libraries (namely Sphinx) to compile this module and resolve
# the import of `python_path` correctly. The latter is used to resolve
# the import within the module, injecting it into the global namespace
os.environ["PYTHONPATH"] = args.python_path
sys.path.insert(0, args.python_path)
globals()["pandas"] = importlib.import_module("pandas")
# Set the matplotlib backend to the non-interactive Agg backend for all
# child processes.
os.environ["MPLBACKEND"] = "module://matplotlib.backends.backend_agg"
builder = DocBuilder(
args.num_jobs,
not args.no_api,
args.single,
args.verbosity,
args.warnings_are_errors,
)
return getattr(builder, args.command)()
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
kagayakidan/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
roshantha9/AbstractManycoreSim | src/libApplicationModel/WorkflowGenerator.py | 1 | 22269 | import pprint
import sys
import math, random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
import pickle
## local imports
import Task
import libBuffer.Buffer
from SimParams import SimParams
from TaskSet import TaskSet
from Task import TaskModel
class WorkflowGenerator():
def __init__(self, env, max_wf,
min_videos_per_wf, max_videos_per_wf,
min_gops_per_video, max_gops_per_video,
min_inter_video_gap, max_inter_video_gap,
min_interarrival, max_interarrival):
self.env = env
self.max_wf = max_wf
self.min_videos = min_videos_per_wf
self.max_videos = max_videos_per_wf
self.min_gops_per_video = min_gops_per_video
self.max_gops_per_video = max_gops_per_video
self.max_inter_video_gap = max_inter_video_gap
self.min_inter_video_gap = min_inter_video_gap
self.min_interarrival = min_interarrival
self.max_interarrival = max_interarrival
self.workflows = {}
# for later use
self.max_task_priority = None
self.used_pri_values = []
self.workflows_summary = {}
##################
# getters/setters
##################
def get_max_task_priority(self):
return self.max_task_priority
def get_used_pri_values(self):
return self.used_pri_values
def generate_workflows(self):
task_start_id = 0
unique_job_start_id = 0
priority_offset = 0
#print "random.randint(2,100000) : " + str(random.randint(2,100000))
#print "np.random.randint(2,100000) : " + str(np.random.randint(2,100000))
for each_wf_id in xrange(self.max_wf):
num_vids = random.randint(self.min_videos, self.max_videos) # determine number of videos for this workflow
#initial_gap = random.uniform(SimParams.TASKDISPATCHER_RESET_DELAY, self.max_inter_video_gap*1.5) # initially we want a gap, we don't want all streams to start at once
#initial_gap = 0.00001
if(SimParams.WFGEN_INITIAL_VID_EQUAL_FOR_ALL_VIDS == True): # all wfs have the same initial start rules
initial_gap = 0.00001 + random.uniform(SimParams.WFGEN_INITIAL_VID_GAP_MIN, SimParams.WFGEN_INITIAL_VID_GAP_MAX)
else:
if(each_wf_id == 0): # wfs have a sequential start offset
initial_gap = 0.00001 + random.uniform(SimParams.WFGEN_INITIAL_VID_GAP_MIN, SimParams.WFGEN_INITIAL_VID_GAP_MAX)
else:
# offset = pre wf initial start time
offset = self.workflows_summary[each_wf_id-1][0]['starttime']
initial_gap = offset + random.uniform(SimParams.WFGEN_INITIAL_VID_GAP_MIN, SimParams.WFGEN_INITIAL_VID_GAP_MAX)
jobs_start_time = initial_gap
self.workflows[each_wf_id] = []
self.workflows_summary[each_wf_id] = {}
for each_vid in xrange(num_vids):
# determine video stream resolution
if(SimParams.DVB_RESOLUTIONS_SELECTED_RANDOM == True):
#pprint.pprint(SimParams.DVB_RESOLUTIONS)
ridx = np.random.choice(range(len(SimParams.DVB_RESOLUTIONS)))
resolution = SimParams.DVB_RESOLUTIONS[ridx]
else:
if(self.max_wf <= len(SimParams.DVB_RESOLUTIONS_FIXED)): # only when there is one vid per wf
resolution = SimParams.DVB_RESOLUTIONS_FIXED[each_wf_id]
else:
print self.max_wf
print len(SimParams.DVB_RESOLUTIONS_FIXED)
sys.exit('Error: generate_workflows: Error - not enough elements in SimParams.DVB_RESOLUTIONS_FIXED')
# determine frame rate for the video
if SimParams.USE_VIDSTRM_SPECIFIC_FRAMERATE == True:
res_total_pixels = resolution[0]*resolution[1]
if res_total_pixels in SimParams.RESOLUTION_SPECIFIC_FRAME_RATE:
frame_rate = np.random.choice(SimParams.RESOLUTION_SPECIFIC_FRAME_RATE[res_total_pixels])
else:
sys.exit("Error - resolution not in SimParams.RESOLUTION_SPECIFIC_FRAME_RATE:" + pprint.pformat(resolution))
else:
frame_rate = SimParams.FRAME_RATE
# generate jobs/gops for the video stream
job_start_id = 0
(jobs_list, job_endtime, num_jobs, avg_dt, min_dt) = self._generate_jobs(job_start_id, unique_job_start_id,
task_start_id,
self.min_gops_per_video,
self.max_gops_per_video,
each_vid, each_wf_id,
SimParams.GOP_STRUCTURE,
jobs_start_time,
resolution[1],
resolution[0],
frame_rate
)
print str(resolution[1]) + "x" + str(resolution[0])
temp_frames = {}
temp_gops = []
for each_task in jobs_list:
if(each_task.get_unique_gop_id() not in temp_frames):
temp_frames[each_task.get_unique_gop_id()] = [each_task.get_id()]
temp_gops.append(each_task.get_unique_gop_id())
else:
temp_frames[each_task.get_unique_gop_id()].append(each_task.get_id())
self.workflows_summary[each_wf_id][each_vid] = {}
self.workflows_summary[each_wf_id][each_vid]={
'starttime' : jobs_start_time,
'endtime' : job_endtime,
'framerate' : jobs_list[0].get_framerate(),
'avg_dispatch_rate' : avg_dt,
'min_dispatch_rate' : min_dt,
'gop_len' : len(jobs_list[0].get_gopstructure()),
'numgops' : num_jobs,
'resolution' : resolution,
'frames' : temp_frames,
'gops' : temp_gops
}
# reset times and ids
gap = random.uniform(self.min_inter_video_gap, self.max_inter_video_gap)
jobs_start_time = job_endtime + gap
#job_start_id += 1
unique_job_start_id = jobs_list[len(jobs_list)-1].get_unique_gop_id() + 1
task_start_id += len(jobs_list)
self.workflows[each_wf_id].extend(jobs_list)
# save workflow summary
if(SimParams.TRACK_WFSUMMARY_PPRINT == True):
workflow_logfile=open('workflow_summary.js', 'w')
pprint.pprint(self.workflows_summary, workflow_logfile, width=128)
print '%f'%self.env.now + "," + "WorkflowGenerator::, finished generating wf_id = " + str(each_wf_id)
def getLastScheduledTask(self):
tmptasks = []
for each_wf_key, each_wf_val in self.workflows.iteritems():
tmptasks.append(each_wf_val[len(each_wf_val)-1])
sorted_tmptasks = sorted(tmptasks, key=lambda x: x.get_scheduledDispatchTime(), reverse=True)
return sorted_tmptasks[0]
def getLastScheduledVideoStream(self):
vs_admission_times = {}
for each_wf_key, each_wf_val in self.workflows_summary.iteritems():
for each_vid_k, each_vid_v in each_wf_val.iteritems():
vid_start_time = each_vid_v['starttime']
temp_k = str(each_wf_key) + "_" + str(each_vid_k)
vs_admission_times[temp_k] = vid_start_time
# find max starttime
max_st = max(vs_admission_times.values())
max_st_k = [vs_k for vs_k, vs_v in vs_admission_times.iteritems() if vs_v == max_st][0]
wf_id = int(max_st_k.split("_")[0])
vs_id = int(max_st_k.split("_")[1])
return (wf_id, vs_id)
def getFirstScheduledTask(self):
tmptasks = []
for each_wf_key, each_wf_val in self.workflows.iteritems():
tmptasks.append(each_wf_val[0])
sorted_tmptasks = sorted(tmptasks, key=lambda x: x.get_scheduledDispatchTime(), reverse=False)
return sorted_tmptasks[0]
def setTaskPriorities_AllUnique(self):
# how many tasks have been created in total ?
task_count = 0
for each_wf in self.workflows.itervalues():
task_count += len(each_wf)
# generate unique random numbers, enough for every task generated
random_unique_pri_list = random.sample(range(1,task_count+1), task_count)
# apply unique priorities for each task in the workflow
i=0
for each_wf in self.workflows.itervalues():
for each_task in each_wf:
each_task.set_priority(random_unique_pri_list[i])
i+=1
# whats the max priority set ?
self.max_task_priority = max(random_unique_pri_list)
def setTaskPriorities_GroupedByJobs(self):
i=1
def setTaskPriorities_GroupedByVids(self):
i=1
# generate all the gops for a video stream
def _generate_jobs(self, job_start_id, unique_job_start_id,
task_start_id, min_jobs, max_jobs, video_stream_id, wf_id,
gop_struct, jobs_dispatchtime_start, frame_h, frame_w, fps):
num_gops = random.randint(min_jobs, max_jobs)
# therefore the end-time ?
job_end_time = jobs_dispatchtime_start + ((float(num_gops) * float(len(gop_struct))) / (float(fps) * 60.0))
taskset = TaskSet(self.env)
# generate new priorities, excluding the ones already in the pool
pri_range = self._genRandomNumList(SimParams.GOP_LENGTH,self.used_pri_values)
# generate multiple gops
if SimParams.TASKSET_MODEL == TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL_INTERRELATEDGOPS:
final_dispatch_time, avg_dt, min_dt = taskset.generateMPEG2FrameInterRelatedGOPTaskSet(num_gops, task_start_id , job_start_id, unique_job_start_id,
taskset_dispatch_start_time = jobs_dispatchtime_start,
video_stream_id = video_stream_id,
wf_id = wf_id,
frame_w=frame_w,
frame_h=frame_h,
frame_rate=fps,
priority_range = pri_range)
elif(SimParams.TASKSET_MODEL == TaskModel.TASK_MODEL_MHEG2_FRAME_ET_LEVEL):
final_dispatch_time, avg_dt, min_dt = taskset.generateMPEG2FrameTaskSet(num_gops, task_start_id , job_start_id, unique_job_start_id,
taskset_dispatch_start_time = jobs_dispatchtime_start,
video_stream_id = video_stream_id,
wf_id = wf_id,
frame_w=frame_w,
frame_h=frame_h,
frame_rate=fps,
priority_range = pri_range)
# adaptive gop, slices/tiles, task splitting, pulevel cc
elif(SimParams.TASKSET_MODEL in [TaskModel.TASK_MODEL_HEVC_FRAME_LEVEL, TaskModel.TASK_MODEL_HEVC_TILE_LEVEL] ):
pri_range = np.random.randint(10000,size=50)
final_dispatch_time, avg_dt, min_dt = taskset.generateHEVCFrameTaskSet(num_gops, task_start_id , job_start_id, unique_job_start_id,
taskset_dispatch_start_time = jobs_dispatchtime_start,
video_stream_id = video_stream_id,
wf_id = wf_id,
frame_w=frame_w,
frame_h=frame_h,
frame_rate = fps,
priority_range = pri_range,
)
# set the worst-case exuction time for all tasks in the task_pool
taskset.set_worstCaseComputationTime_alltasks()
# if(final_dispatch_time > job_end_time):
# job_end_time = final_dispatch_time
job_end_time = final_dispatch_time
return (taskset.taskList, job_end_time, num_gops, avg_dt, min_dt)
def _remove_dups(self,seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
def dumpWorkflowsToFile(self, fname="workflows.xml"):
file = open(fname, "w")
file.write("<Workflows>")
for each_wf_key, each_wf_values in self.workflows.iteritems():
file.write("<workflow id='%d'>" % each_wf_key)
for each_task in each_wf_values :
#pprint.pprint(each_task)
file.write( each_task._debugLongXML() )
file.write("\n")
file.write("</workflow>")
file.write("</Workflows>")
file.close()
def showTaskTimeLine(self, num_wfs, simon_wf_results_summary = None, fname = 'showTaskTimeLine.png', show_vid_blocks = False):
print "showTaskTimeLine: Enter"
num_workflows = len(self.workflows.items())
print "num_workflows=" + str(num_workflows)
fig = plt.figure(dpi=100, figsize=(20.0, float(num_workflows)*1.5))
#fig = plt.figure()
annot_text = {
"wf_vid_id": [],
"x": [],
"y" : [],
"text" : [],
"colour" : []
}
for each_wf_key, each_wf_values in self.workflows.iteritems():
#ax = plt.subplot(1,num_workflows,each_wf_key)
dispatch_times = []
vid_count = 0
video_start_end_pos = {}
for each_task in each_wf_values :
sdt = each_task.get_scheduledDispatchTime()
dispatch_times.append(round(sdt,2))
if(show_vid_blocks == True):
if(each_task.get_isHeadVideoGop() == True):
if vid_count not in video_start_end_pos:
video_start_end_pos[vid_count] = {
'start_x' : round(sdt,2),
'end_x' : None
}
elif(each_task.get_isTailVideoGop() == True):
video_start_end_pos[vid_count]['end_x'] = round(sdt,2)
if(each_task.get_parentGopId() == 0 and each_task.get_frameIXinGOP() == 0):
annot_text["wf_vid_id"].append((each_wf_key, vid_count))
annot_text["x"].append(round(sdt,2))
annot_text["y"].append(each_wf_key+0.16)
text = str(each_task.get_frame_w()) + "x" + str(each_task.get_frame_h()) + "\n" + \
str(round(self.workflows_summary[each_wf_key][vid_count]['avg_dispatch_rate'],3)) + "\n" + \
str(each_task.get_scheduledDispatchTime()) #str(round(self.workflows_summary[each_wf_key][vid_count]['min_dispatch_rate'],3))
annot_text["text"].append(text)
if(simon_wf_results_summary != None):
try:
if(simon_wf_results_summary[each_wf_key][vid_count]['result'] == True):
annot_text["colour"].append('green')
else:
if(len(simon_wf_results_summary[each_wf_key][vid_count]['gops_in_outbuff']) > 0):
annot_text["colour"].append('#FF00AA')
else:
annot_text["colour"].append('#ff0000')
except:
annot_text["colour"].append("black")
vid_count = vid_count + 1
x = np.round(np.arange(0.0, max(dispatch_times), 0.01), 2)
## setting y-axis
i = 0
y = [-1] * len(x)
for each_x in x:
if(each_x in dispatch_times):
y[i] = each_wf_key
i = i+1
# plot
plt.scatter(x,y, s=2)
plt.hold(True)
# for key,val in video_start_end_pos.iteritems():
# plt.hlines(each_wf_key, val['start_x'], val['end_x'], linewidth=5, alpha=0.5, color='b')
# plt.hold(True)
#plt.hold(True)
plt.minorticks_on()
plt.grid(True, which='major', color='b', linestyle='-', alpha=0.2)
plt.grid(True, which='minor', color='b', linestyle='--', alpha=0.2)
#pprint.pprint(annot_text)
if(simon_wf_results_summary != None):
for i, x in enumerate(annot_text["x"]):
plt.annotate(annot_text["text"][i], (annot_text["x"][i],annot_text["y"][i]), color=annot_text["colour"][i], fontsize=6)
else:
for i, x in enumerate(annot_text["x"]):
plt.annotate(annot_text["text"][i], (annot_text["x"][i],annot_text["y"][i]), fontsize=6)
print "showTaskTimeLine: saving image : " + fname
plt.savefig(fname, bbox_inches='tight', dpi=100)
plt.close(fig)
#plt.show()
@staticmethod
def plot_show():
plt.show()
######################
## helper functions ##
######################
def _weightedChoice(self, weights, objects):
#http://stackoverflow.com/questions/10803135/weighted-choice-short-and-simple
"""Return a random item from objects, with the weighting defined by weights
(which must sum to 1)."""
cs = np.cumsum(weights) #An array of the weights, cumulatively summed.
idx = sum(cs < np.random.rand()) #Find the index of the first weight over a random value.
return objects[idx]
def _genRandomNumList(self, list_len, exclusion_list):
count = 0
result = []
max_int = (SimParams.NUM_WORKFLOWS * SimParams.WFGEN_MAX_VIDS_PER_WF * SimParams.GOP_LENGTH) + \
(SimParams.NUM_WORKFLOWS * SimParams.WFGEN_MAX_VIDS_PER_WF)
while (count < list_len):
random_num = random.randint(1,max_int)
if(random_num not in exclusion_list):
result.append(random_num)
count += 1
if (len(result) < SimParams.GOP_LENGTH):
sys.exit('Error: _genRandomNumList:: error generating priorities')
else:
self.used_pri_values.extend(result)
return result
| gpl-3.0 |
arjoly/scikit-learn | sklearn/exceptions.py | 4 | 4328 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifes the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| bsd-3-clause |
jkarnows/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
rbiswas4/utils | binningutils.py | 1 | 2816 | #!/usr/bin/env python
import numpy as np
import math as pm
verbose = False
def nbinarray(numpyarray ,
binningcol ,
binsize ,
binmin ,
binmax ):
"""
bins a numpy array in equal bins in the variable in the column
of the array indexed by the integer binningcol.
args:
binningcol: integer, mandatory
integer indexing the column of the array holding the
variable wrt which we are binning
binsize : float, mandatory
binmins : float, mandatory
binmax : float, mandatory
returns: a numpy array of elements x corresponding to the bins. Each
element x is an array of the elements of in input numpyarray
that are assigned to the bin
example usage:
notes:
"""
#First define the bins:
numrows , numcols = np.shape(numpyarray)
numbins = int(pm.floor((binmax - binmin )/binsize))
binningcolbins = np.linspace(binmin , binmax ,numbins+1)
digitizedindex = np.digitize(numpyarray[:,binningcol],
bins = binningcolbins)
binnedarray = []
for i in range(numbins):
binnedarray.append(numpyarray[digitizedindex==i+1])
ret= np.array(binnedarray)
if verbose :
print "size of bins" , map(len, ret)
return ret
def ngetbinnedvec( nbinnedarray , col):
"""Given an array of 2d numpy arrays (ie. having
shape (numrows, numcols), returns an array of 1d
numpy arrays composed of the col th column of the
2d arrays.
example useage :
"""
numbins = len(nbinnedarray)
binnedvec = []
for i in range(numbins):
binnedvec.append(nbinnedarray[i][:,col])
return binnedvec
if __name__ == "__main__":
import sys
import numpy as np
import matplotlib.pyplot as plt
num = 10
#basic model: x is independent variable, y, z are dependent
np.random.seed = -4
x = np.random.random(size = num)
x.sort()
y = 2.0 * x
z = 0.5 * x * x + 1.5 * x + 3.0
#Set up a numpy array adding noise to y and z
a = np.zeros (shape = (num,3))
a [:,0 ] = x
a [:,1 ] = y + np.random.normal(size = num)
a [:,2 ] = z + np.random.normal(size = num)
#bin the array according to values of x which is in the col 0
#using uniform size bins from 0. to 1. of size 0.1
binnedarray = nbinarray ( a,
binningcol = 0,
binmin = 0.,
binmax = 1.0,
binsize = 0.1)
print binnedarray
print type(binnedarray)
sys.exit()
print "\n-------------------------\n"
xbinned= ngetbinnedvec (binnedarray, 0)
ybinned= ngetbinnedvec (binnedarray, 1)
#print xbinned
xavg = map (np.average , xbinned)
yavg = map (np.average , ybinned)
#xavg = map( lambda x : np.average(x ) , xbinned )
#yavg = map( lambda x : np.average(x) , ybinned)
#print map( lambda x , w : np.average(x, w), xbinned, ybinned)
plt.plot(x, y, 'k-')
plt.plot(a[:,0] , a[:,1], 'ks')
plt.plot(a[:,0], a[:,2], 'ro')
plt.plot(x,z , 'r--')
plt.plot( xavg, yavg, 'bd')
plt.show()
| mit |
BubuLK/sfepy | script/plot_times.py | 5 | 1722 | #!/usr/bin/env python
"""
Plot time steps, times of time steps and time deltas in a HDF5 results file.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.postprocess.time_history import extract_times
helps = {
'logarithmic' :
'plot time steps in logarithmic scale',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-l', '--logarithmic',
action='store_true', dest='logarithmic',
default=False, help=helps['logarithmic'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
plt.rcParams['lines.linewidth'] = 3
plt.rcParams['lines.markersize'] = 9
fontsize = 16
steps, times, nts, dts = extract_times(filename)
dts[-1] = nm.nan
ax = plt.subplot(211)
if options.logarithmic:
l1, = ax.semilogy(steps, dts, 'b')
else:
l1, = ax.plot(steps, dts, 'b')
ax.set_xlabel('step', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
ax = ax.twinx()
l2, = ax.plot(steps, times, 'g')
ax.set_ylabel(r'$t$', fontsize=fontsize)
ax.legend([l1, l2], [r'$\Delta t$', r'$t$'], loc=0)
ax = plt.subplot(212)
if options.logarithmic:
ax.semilogy(times, dts, 'b+')
else:
ax.plot(times, dts, 'b+')
ax.set_xlabel(r'$t$', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/metrics/regression.py | 31 | 17366 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value corresponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
quaquel/EMAworkbench | ema_workbench/examples/flu_pairsplot.py | 1 | 1444 | '''
Created on 20 sep. 2011
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import matplotlib.pyplot as plt
import numpy as np
from ema_workbench import load_results, ema_logging
from ema_workbench.analysis.pairs_plotting import (pairs_lines, pairs_scatter,
pairs_density)
ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL)
# load the data
fh = './data/1000 flu cases no policy.tar.gz'
experiments, outcomes = load_results(fh)
# transform the results to the required format
# that is, we want to know the max peak and the casualties at the end of the
# run
tr = {}
# get time and remove it from the dict
time = outcomes.pop('TIME')
for key, value in outcomes.items():
if key == 'deceased population region 1':
tr[key] = value[:, -1] # we want the end value
else:
# we want the maximum value of the peak
max_peak = np.max(value, axis=1)
tr['max peak'] = max_peak
# we want the time at which the maximum occurred
# the code here is a bit obscure, I don't know why the transpose
# of value is needed. This however does produce the appropriate results
logical = value.T == np.max(value, axis=1)
tr['time of max'] = time[logical.T]
pairs_scatter(experiments, tr, filter_scalar=False)
pairs_lines(experiments, outcomes)
pairs_density(experiments, tr, filter_scalar=False)
plt.show()
| bsd-3-clause |
nekrut/tools-iuc | tools/fsd/fsd_beforevsafter.py | 17 | 15642 | #!/usr/bin/env python
# Family size distribution of DCS from various steps of the Galaxy pipeline
#
# Author: Monika Heinzl & Gundula Povysil, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes a TXT file with tags of reads that were aligned to certain regions of the reference genome (optional),
# a TABULAR file with tags before the alignment to the SSCS, a FASTA file with reads that were part of the DCS and
# a FASTA file with tags after trimming as input (optional).
# The program produces a plot which shows the distribution of family sizes of the DCS from the input files and
# a CSV file with the data of the plot.
# USAGE: python FSD before vs after_no_refF1.3_FINAL.py --inputFile_SSCS filenameSSCS --inputName1 filenameSSCS --makeDCS filenameMakeDCS --afterTrimming filenameAfterTrimming --alignedTags DCSbamFile
# --output_tabular outputfile_name_tabular --output_pdf outputfile_name_pdf
import argparse
import sys
from collections import Counter
import matplotlib.pyplot as plt
import numpy
import pysam
from Bio import SeqIO
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def readFasta(file):
tag_consensus = []
fs_consensus = []
with open(file, "r") as consFile:
for record in SeqIO.parse(consFile, "fasta"):
tag_consensus.append(record.id)
line = record.description
a, b = line.split(" ")
fs1, fs2 = b.split("-")
fs_consensus.extend([fs1, fs2])
fs_consensus = numpy.array(fs_consensus).astype(int)
return (tag_consensus, fs_consensus)
def make_argparser():
parser = argparse.ArgumentParser(description='Analysis of read loss in duplex sequencing data')
parser.add_argument('--inputFile_SSCS',
help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--makeDCS',
help='FASTA File with information about tag and family size in the header.')
parser.add_argument('--afterTrimming', default=None,
help='FASTA File with information about tag and family size in the header.')
parser.add_argument('--bamFile',
help='BAM file with aligned reads.')
parser.add_argument('--output_pdf', default="data.pdf", type=str,
help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str,
help='Name of the pdf and tabular file.')
return parser
def compare_read_families_read_loss(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
SSCS_file = args.inputFile_SSCS
SSCS_file_name = args.inputName1
makeConsensus = args.makeDCS
afterTrimming = args.afterTrimming
ref_genome = args.bamFile
title_file = args.output_tabular
title_file2 = args.output_pdf
sep = "\t"
with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf:
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
list1 = []
colors = []
labels = []
# data with tags of SSCS
data_array = readFileReferenceFree(SSCS_file, "\t")
seq = numpy.array(data_array[:, 1])
tags = numpy.array(data_array[:, 2])
quant = numpy.array(data_array[:, 0]).astype(int)
# split data with all tags of SSCS after ab and ba strands
all_ab = seq[numpy.where(tags == "ab")[0]]
all_ba = seq[numpy.where(tags == "ba")[0]]
quant_ab_sscs = quant[numpy.where(tags == "ab")[0]]
quant_ba_sscs = quant[numpy.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab_sscs))
seqDic_ba = dict(zip(all_ba, quant_ba_sscs))
# get tags of the SSCS which form a DCS
# group large family sizes
bigFamilies = numpy.where(quant > 20)[0]
quant[bigFamilies] = 22
maximumX = numpy.amax(quant)
# find all unique tags and get the indices for ALL tags (ab AND ba)
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c > 1]
# get family sizes, tag for the duplicates
duplTags_double = quant[numpy.in1d(seq, d)]
list1.append(duplTags_double)
colors.append("#0000FF")
labels.append("before SSCS building")
duplTags = duplTags_double[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
d2 = d[(duplTags >= 3) & (duplTagsBA >= 3)] # ab and ba FS>=3
# all SSCSs FS>=3
seq_unique, seqUnique_index = numpy.unique(seq, return_index=True)
seq_unique_FS = quant[seqUnique_index]
seq_unique_FS3 = seq_unique_FS[seq_unique_FS >= 3]
legend1 = "\ntotal nr. of tags (unique, FS>=1):\nDCS (before SSCS building, FS>=1):\ntotal nr. of tags (unique, FS>=3):\nDCS (before SSCS building, FS>=3):"
legend2 = "total numbers * \n{:,}\n{:,}\n{:,}\n{:,}".format(len(seq_unique_FS), len(duplTags),
len(seq_unique_FS3), len(d2))
plt.text(0.55, 0.14, legend1, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.14, legend2, size=11, transform=plt.gcf().transFigure)
# data make DCS
tag_consensus, fs_consensus = readFasta(makeConsensus)
# group large family sizes in the plot of fasta files
bigFamilies = numpy.where(fs_consensus > 20)[0]
fs_consensus[bigFamilies] = 22
list1.append(fs_consensus)
colors.append("#298A08")
labels.append("after DCS building")
legend3 = "after DCS building:"
legend4 = "{:,}".format(len(tag_consensus))
plt.text(0.55, 0.11, legend3, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.11, legend4, size=11, transform=plt.gcf().transFigure)
# data after trimming
if afterTrimming is not None:
tag_trimming, fs_trimming = readFasta(afterTrimming)
bigFamilies = numpy.where(fs_trimming > 20)[0]
fs_trimming[bigFamilies] = 22
list1.append(fs_trimming)
colors.append("#DF0101")
labels.append("after trimming")
legend5 = "after trimming:"
legend6 = "{:,}".format(len(tag_trimming))
plt.text(0.55, 0.09, legend5, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.09, legend6, size=11, transform=plt.gcf().transFigure)
# data of tags aligned to reference genome
if ref_genome is not None:
pysam.index(ref_genome)
bam = pysam.AlignmentFile(ref_genome, "rb")
seq_mut = []
for read in bam.fetch():
if not read.is_unmapped:
if '_' in read.query_name:
tags = read.query_name.split('_')[0]
else:
tags = read.query_name
seq_mut.append(tags)
# use only unique tags that were alignment to the reference genome
seq_mut = numpy.array(seq_mut)
seq_mut, seqMut_index = numpy.unique(seq_mut, return_index=True)
# get family sizes for each tag in the BAM file
quant_ab = []
quant_ba = []
for i in seq_mut:
quant_ab.append(seqDic_ab.get(i))
quant_ba.append(seqDic_ba.get(i))
quant_ab_ref = numpy.array(quant_ab)
quant_ba_ref = numpy.array(quant_ba)
quant_all_ref = numpy.concatenate((quant_ab_ref, quant_ba_ref))
bigFamilies = numpy.where(quant_all_ref > 20)[0] # group large family sizes
quant_all_ref[bigFamilies] = 22
list1.append(quant_all_ref)
colors.append("#04cec7")
labels.append("after alignment\nto reference")
legend7 = "after alignment to reference:"
length_DCS_ref = len(quant_ba_ref) # count of duplex tags that were aligned to reference genome
legend8 = "{:,}".format(length_DCS_ref)
plt.text(0.55, 0.07, legend7, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.07, legend8, size=11, transform=plt.gcf().transFigure)
counts = plt.hist(list1, bins=range(-1, maximumX + 1), stacked=False, label=labels, color=colors, align="left", alpha=1, edgecolor="black", linewidth=1)
ticks = numpy.arange(0, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(numpy.array(ticks), ticks1)
if ref_genome is not None:
count = numpy.array([v for k, v in sorted(Counter(quant_ab_ref).items())]) # count all family sizes from all ab strands
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.1, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}" \
.format(max(quant_ab_ref), count[len(count) - 1], float(count[len(count) - 1]) / sum(count),
sum(numpy.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = numpy.array(
[v for k, v in sorted(Counter(quant_ba_ref).items())]) # count all family sizes from all ba strands
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(quant_ba_ref), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
legend4 = "* In the plot, the family sizes of ab and ba strands and of both duplex tags were used.\nWhereas the total numbers indicate only the single count of the formed duplex tags."
plt.text(0.1, 0.02, legend4, size=11, transform=plt.gcf().transFigure)
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.title("Family size distribution of tags from various steps of the Du Novo pipeline", fontsize=14)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
# write information about plot into a csv file
output_file.write("Dataset:{}{}\n".format(sep, SSCS_file_name))
if ref_genome is not None:
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(quant_ab_ref), sep, max(quant_ba_ref)))
output_file.write(
"absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write(
"relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep,
float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("\ntotal nr. of reads before SSCS building{}{}\n".format(sep, sum(numpy.array(data_array[:, 0]).astype(int))))
output_file.write("\n\nValues from family size distribution\n")
if afterTrimming is None and ref_genome is None:
if afterTrimming is None:
output_file.write("{}before SSCS building{}after DCS building\n".format(sep, sep))
elif ref_genome is None:
output_file.write("{}before SSCS building{}atfer DCS building\n".format(sep, sep))
for fs, sscs, dcs in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs)))
output_file.write("sum{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1]))))
elif afterTrimming is None or ref_genome is None:
if afterTrimming is None:
output_file.write("{}before SSCS building{}after DCS building{}after alignment to reference\n".format(sep, sep, sep))
elif ref_genome is None:
output_file.write("{}before SSCS building{}atfer DCS building{}after trimming\n".format(sep, sep, sep))
for fs, sscs, dcs, reference in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])], counts[0][2][2:len(counts[0][2])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs), sep, int(reference)))
output_file.write("sum{}{}{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1])), sep, int(sum(counts[0][2]))))
else:
output_file.write("{}before SSCS building{}after DCS building{}after trimming{}after alignment to reference\n".format(sep, sep, sep, sep))
for fs, sscs, dcs, trim, reference in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])], counts[0][2][2:len(counts[0][2])], counts[0][3][2:len(counts[0][3])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs), sep, int(trim), sep, int(reference)))
output_file.write("sum{}{}{}{}{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1])), sep, int(sum(counts[0][2])), sep, int(sum(counts[0][3]))))
output_file.write("\n\nIn the plot, the family sizes of ab and ba strands and of both duplex tags were used.\nWhereas the total numbers indicate only the single count of the formed duplex tags.\n")
output_file.write("total nr. of tags (unique, FS>=1){}{}\n".format(sep, len(seq_unique_FS)))
output_file.write("DCS (before SSCS building, FS>=1){}{}\n".format(sep, len(duplTags)))
output_file.write("total nr. of tags (unique, FS>=3){}{}\n".format(sep, len(seq_unique_FS3)))
output_file.write("DCS (before SSCS building, FS>=3){}{}\n".format(sep, len(d2)))
output_file.write("after DCS building{}{}\n".format(sep, len(tag_consensus)))
if afterTrimming is not None:
output_file.write("after trimming{}{}\n".format(sep, len(tag_trimming)))
if ref_genome is not None:
output_file.write("after alignment to reference{}{}\n".format(sep, length_DCS_ref))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_read_loss(sys.argv))
| mit |
eteq/astropy-helpers | astropy_helpers/sphinx/ext/tests/test_docscrape.py | 2 | 18105 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from ..docscrape import NumpyDocString, FunctionDoc, ClassDoc
from ..docscrape_sphinx import SphinxDocString, SphinxClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert len(doc['Parameters']) == 3
assert [n for n,_,_ in doc['Parameters']] == ['mean','cov','shape']
arg, arg_type, desc = doc['Parameters'][1]
assert arg_type == '(N, N) ndarray'
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert len(doc['Other Parameters']) == 1
assert [n for n,_,_ in doc['Other Parameters']] == ['spam']
arg, arg_type, desc = doc['Other Parameters'][0]
assert arg_type == 'parrot'
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert len(doc['Returns']) == 2
arg, arg_type, desc = doc['Returns'][0]
assert arg == 'out'
assert arg_type == 'ndarray'
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert arg == 'list of str'
assert arg_type == ''
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert len(doc['Notes']) == 17
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert doc['index']['default'] == 'random'
assert len(doc['index']) == 2
assert len(doc['index']['refguide']) == 2
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert len(doc2['Parameters']) == 2
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
signature == 'my_signature(\*params, \*\*kwds)'
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert doc4['Extended Summary'] == []
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert len(doc5['Raises']) == 1
name,_,desc = doc5['Raises'][0]
assert name == 'LinAlgException'
assert desc == ['If array is singular.']
def test_warns():
assert len(doc5['Warns']) == 1
name,_,desc = doc5['Warns'][0]
assert name == 'SomeWarning'
assert desc == ['If needed']
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
| bsd-3-clause |
duaneloh/ExpandMaximizeCompress | utils/autoplot_unstable.py | 2 | 15792 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import sys
import Tkinter as Tk
import os
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import time
from glob import glob
import re
class Plotter:
def __init__(self, master, size=200):
self.master = master
self.size = size
self.center = self.size/2
self.max_iter = 0
self.fname = Tk.StringVar()
self.logfname = Tk.StringVar()
self.rangestr = Tk.StringVar()
self.imagename = Tk.StringVar()
self.log_imagename = Tk.StringVar()
self.layernum = Tk.IntVar()
self.ifcheck = Tk.IntVar()
self.iter = Tk.IntVar()
self.orientnum = set()
self.orient = []
self.log_txt = ""
self.fname.set('data/output/intens_001.bin')
self.logfname.set('EMC.log')
self.imagename.set('images/' + os.path.splitext(os.path.basename(self.fname.get()))[0] + '.png')
self.log_imagename.set('images/log_fig.png')
self.image_exists = False
self.rangestr.set(str(1.))
self.layernum.set(self.center)
self.ifcheck.set(0)
self.iter.set(0)
self.fig = plt.figure(figsize=(14,5))
self.fig.subplots_adjust(left=0.0, bottom=0.00, right=0.99, wspace=0.0)
self.canvas = FigureCanvasTkAgg(self.fig, self.master)
self.canvas.get_tk_widget().grid(row=0,column=0)
self.log_fig = plt.figure(figsize=(14,5), facecolor='white')
#self.log_fig.subplots_adjust(left=0.0, bottom=0.00, right=0.99, wspace=0.0)
self.plotcanvas = FigureCanvasTkAgg(self.log_fig, self.master)
self.plotcanvas.get_tk_widget().grid(row=1,column=0)
self.options = Tk.Frame(self.master,relief=Tk.GROOVE,borderwidth=5,width=400, height=200)
#self.options.grid(row=0,column=1,rowspan=2,sticky=Tk.N+Tk.S)
self.options.grid(row=0,column=1,sticky=Tk.N)
self.log_display = Tk.Frame(self.master,relief=Tk.GROOVE,borderwidth=5,width=400, height=200)
self.log_display.grid(row=1,column=1,sticky=Tk.N)
self.old_fname = self.fname.get()
self.old_rangestr = self.rangestr.get()
self.master.bind('<Return>', self.parse_and_plot)
self.master.bind('<KP_Enter>', self.parse_and_plot)
self.master.bind('<Control-s>', self.save_plot)
self.master.bind('<Control-q>', self.quit_)
self.master.bind('<Up>', self.increment_layer)
self.master.bind('<Down>', self.decrement_layer)
self.init_UI()
def init_UI(self):
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Label(line,text="Log Filename: ").pack(side=Tk.LEFT)
Tk.Entry(line,textvariable=self.logfname,width=20).pack(side=Tk.LEFT, fill=Tk.X, expand=1)
Tk.Label(line,text="PlotMax: ").pack(side=Tk.LEFT, fill=Tk.X)
Tk.Entry(line,textvariable=self.rangestr,width=10).pack(side=Tk.LEFT)
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Label(line,text="Filename: ").pack(side=Tk.LEFT)
Tk.Entry(line,textvariable=self.fname,width=45).pack(side=Tk.LEFT, fill=Tk.X, expand=1)
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Label(line,text="Image name: ").pack(side=Tk.LEFT)
Tk.Entry(line,textvariable=self.imagename,width=30).pack(side=Tk.LEFT, fill=Tk.X, expand=1)
Tk.Button(line,text="Save",command=self.save_plot).pack(side=Tk.LEFT)
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Label(line,text="Log image name: ").pack(side=Tk.LEFT)
Tk.Entry(line,textvariable=self.log_imagename,width=30).pack(side=Tk.LEFT, fill=Tk.X, expand=1)
Tk.Button(line,text="Save",command=self.save_log_plot).pack(side=Tk.LEFT)
line = Tk.Frame(self.options)
line.pack(fill=Tk.BOTH, expand=1)
Tk.Label(line,text='Layer no. ').pack(side=Tk.LEFT)
Tk.Button(line,text="-",command=self.decrement_layer).pack(side=Tk.LEFT,fill=Tk.Y)
self.layerSlider = Tk.Scale(line,from_=0,to=int(self.size),orient=Tk.HORIZONTAL,length=250,width=20,
variable=self.layernum,command=self.change_iter)
self.layerSlider.pack(side=Tk.LEFT, expand=1, fill=Tk.BOTH)
Tk.Button(line,text="+",command=self.increment_layer).pack(side=Tk.LEFT,fill=Tk.Y)
line = Tk.Frame(self.options)
line.pack(fill=Tk.BOTH, expand=1)
Tk.Label(line,text='Iteration: ').pack(side=Tk.LEFT)
Tk.Button(line,text="-",command=self.decrement_iter).pack(side=Tk.LEFT,fill=Tk.Y)
self.slider = Tk.Scale(line,from_=0,to=self.max_iter,orient=Tk.HORIZONTAL,length=250,width=20,
variable=self.iter,command=None)
self.slider.pack(side=Tk.LEFT, expand=1, fill=Tk.BOTH)
Tk.Button(line,text="+",command=self.increment_iter).pack(side=Tk.LEFT,fill=Tk.Y)
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Button(line,text="Check",command=self.check_for_new).pack(side=Tk.LEFT)
Tk.Checkbutton(line,text="Keep checking",variable=self.ifcheck,command=self.keep_checking).pack(side=Tk.LEFT)
line = Tk.Frame(self.options)
line.pack(fill=Tk.X)
Tk.Button(line,text="Quit",command=self.master.quit).pack(side=Tk.RIGHT)
Tk.Button(line,text="Reparse",command=self.force_plot).pack(side=Tk.RIGHT)
Tk.Button(line,text="Plot",command=self.parse_and_plot).pack(side=Tk.RIGHT)
if os.path.exists('recon.log'):
with open("recon.log", 'r') as f:
all_lines = ''.join(f.readlines())
else:
all_lines = ''
scroll2 = Tk.Scrollbar(self.options)
self.txt2 = Tk.Text(self.options, height=10, width=70, font=("Arial",8))
scroll2.pack(side=Tk.RIGHT, fill=Tk.Y, expand=1)
self.txt2.pack(side=Tk.LEFT, fill=Tk.Y, expand=1)
scroll2.config(command=self.txt2.yview)
self.txt2.config(yscrollcommand=scroll2.set)
self.txt2.insert(Tk.END, all_lines)
scroll = Tk.Scrollbar(self.log_display)
self.txt = Tk.Text(self.log_display, height=28, width=70, font=("Arial",8))
scroll.pack(side=Tk.RIGHT, fill=Tk.Y, expand=1)
self.txt.pack(side=Tk.LEFT, fill=Tk.Y, expand=1)
scroll.config(command=self.txt.yview)
self.txt.config(yscrollcommand=scroll.set)
self.txt.insert(Tk.END, self.log_txt)
def plot_vol(self, num):
self.imagename.set('images/' + os.path.splitext(os.path.basename(self.fname.get()))[0] + '.png')
rangemax = float(self.rangestr.get())
a = self.vol[num,:,:]**0.2
b = self.vol[:,num,:]**0.2
c = self.vol[:,:,num]**0.2
self.fig.clf()
grid = gridspec.GridSpec(1,3, wspace=0., hspace=0.)
s1 = plt.Subplot(self.fig, grid[:,0])
s1.imshow(a, vmin=0, vmax=rangemax, cmap='jet', interpolation='none')
s1.set_title("YZ plane", y=1.01)
s1.axis('off')
self.fig.add_subplot(s1)
s2 = plt.Subplot(self.fig, grid[:,1])
s2.matshow(b, vmin=0, vmax=rangemax, cmap='jet', interpolation='none')
s2.set_title("XZ plane", y=1.01)
s2.axis('off')
self.fig.add_subplot(s2)
s3 = plt.Subplot(self.fig, grid[:,2])
s3.matshow(c, vmin=0, vmax=rangemax, cmap='jet', interpolation='none')
s3.set_title("XY plane", y=1.01)
s3.axis('off')
self.fig.add_subplot(s3)
self.canvas.show()
self.image_exists = True
self.old_rangestr = self.rangestr.get()
def parse(self):
s = int(self.size)
fname = self.fname.get()
if os.path.isfile(fname):
f = open(fname, "r")
else:
print "Unable to open", fname
return
self.vol = np.fromfile(f, dtype='f8')
self.size = int(np.ceil(np.power(len(self.vol), 1./3.)))
self.vol = self.vol.reshape(self.size, self.size, self.size)
self.center = self.size/2
if not self.image_exists:
self.layernum.set(self.center)
self.layerSlider.configure(to=int(self.size))
self.old_fname = fname
def plot_log(self):
with open(self.logfname.get(), 'r') as f:
all_lines = f.readlines()
self.log_txt = ''.join(all_lines)
self.txt.delete('1.0', Tk.END)
self.txt.insert(Tk.END, self.log_txt)
lines = [l.rstrip().split() for l in all_lines]
flag = False
loglines = []
for l in lines:
if len(l) < 1:
continue
if flag is True:
loglines.append(l)
elif l[0] == 'Iter':
flag = True
# Read orientation files only if they haven't already been read
o_files = sorted(glob("data/orientations/*.bin"))
if len(o_files) > 0:
for p in o_files:
fn = os.path.split(p)[-1]
label = int(re.search("orientations_(\d+).bin", fn).groups(1)[0])
if label not in self.orientnum:
self.orientnum.add(label)
with open(p, 'r') as f:
#self.orient.append(np.asarray([int(l.rstrip()) for l in f.readlines()]))
self.orient.append(np.fromfile(f, sep="", dtype='int32'))
else:
#print "skipping", label
pass
else:
o_files = sorted(glob("data/orientations/*.dat"))
for p in o_files:
fn = os.path.split(p)[-1]
label = int(re.search("orientations_(\d+).dat", fn).groups(1)[0])
if label not in self.orientnum:
print "reading ASCII file", fn
self.orientnum.add(label)
with open(p, 'r') as f:
self.orient.append(np.asarray([int(l.rstrip()) for l in f.readlines()]))
else:
#print "skipping", label
pass
o_array = np.asarray(self.orient)
ord = o_array[-1].argsort()
for index in range(len(o_array)):
o_array[index] = o_array[index][ord]
o_array = o_array.T
loglines = np.array(loglines)
if len(loglines) == 0:
return
iter = loglines[:,0].astype(np.int32)
change = loglines[:,2].astype(np.float64)
info = loglines[:,3].astype(np.float64)
like = loglines[:,4].astype(np.float64)
num_rot = loglines[:,5].astype(np.int32)
beta = loglines[:,6].astype(np.float64)
num_rot_change = np.append(np.where(np.diff(num_rot)>0)[0], num_rot.shape[0])
beta_change = np.where(np.diff(beta)>0.)[0]
o_array = np.asarray(self.orient)
istart = 0
for i in range(len(num_rot_change)):
istop = num_rot_change[i]
ord = o_array[istop-1].argsort()
for index in np.arange(istart,istop):
o_array[index] = o_array[index][ord]
istart = istop
o_array = o_array.T
self.log_fig.clf()
grid = gridspec.GridSpec(2,3, wspace=0.3, hspace=0.2)
grid.update(left=0.05, right=0.99, hspace=0.0, wspace=0.2)
s1 = plt.Subplot(self.log_fig, grid[:,0])
s1.plot(iter, change, 'o-')
s1.set_yscale('log')
s1.set_xlabel('Iteration')
s1.set_ylabel('RMS change')
s1_lim = s1.get_ylim()
s1.set_ylim(s1_lim)
for i in beta_change:
s1.plot([i+1,i+1], s1_lim,'k--',lw=1)
for i in num_rot_change[:-1]:
s1.plot([i+1,i+1], s1_lim,'r--',lw=1)
self.log_fig.add_subplot(s1)
s2 = plt.Subplot(self.log_fig, grid[0,1])
s2.plot(iter, info, 'o-')
s2.set_xlabel('Iteration')
s2.set_ylabel(r'Mutual info. $I(K,\Omega)$')
s2_lim = s2.get_ylim()
s2.set_ylim(s2_lim)
for i in beta_change:
s2.plot([i+1,i+1], s2_lim,'k--',lw=1)
for i in num_rot_change[:-1]:
s2.plot([i+1,i+1], s2_lim,'r--',lw=1)
self.log_fig.add_subplot(s2)
s3 = plt.Subplot(self.log_fig, grid[1,1])
s3.plot(iter[1:], like[1:], 'o-')
s3.set_xlabel('Iteration')
s3.set_ylabel('Avg log-likelihood')
s3_lim = s3.get_ylim()
s3.set_ylim(s3_lim)
for i in beta_change:
s3.plot([i+1,i+1], s3_lim,'k--',lw=1)
for i in num_rot_change[:-1]:
s3.plot([i+1,i+1], s3_lim,'r--',lw=1)
self.log_fig.add_subplot(s3)
s4 = plt.Subplot(self.log_fig, grid[:,2])
sh = o_array.shape
s4.imshow(o_array**0.5, aspect=(1.*sh[1]/sh[0]), extent=[1,sh[1],sh[0],0])
s4.get_yaxis().set_ticks([])
s4.set_xlabel('Iteration')
s4.set_ylabel('Most likely orientations of data\n(sorted/colored by last iteration\'s quat)')
self.log_fig.add_subplot(s4)
grid.tight_layout(self.log_fig)
self.plotcanvas.show()
def parse_and_plot(self, event=None):
if not self.image_exists:
self.parse()
self.plot_vol(self.layernum.get())
elif self.old_fname == self.fname.get() and self.old_rangestr != self.rangestr.get():
self.plot_vol(self.layernum.get())
else:
self.parse()
self.plot_vol(self.layernum.get())
def check_for_new(self, event=None):
with open(self.logfname.get(), 'r') as f:
last_line = f.readlines()[-1].rstrip().split()
try:
iter = int(last_line[0])
except ValueError:
iter = 0
if iter > 0 and self.max_iter != iter:
self.fname.set('data/output/intens_%.3d.bin' % iter)
self.max_iter = iter
self.slider.configure(to=self.max_iter)
self.iter.set(iter)
self.plot_log()
self.parse_and_plot()
def keep_checking(self, event=None):
if self.ifcheck.get() is 1:
self.check_for_new()
self.master.after(5000, self.keep_checking)
def force_plot(self, event=None):
self.parse()
self.plot_vol(self.layernum.get())
def increment_layer(self, event=None):
self.layernum.set(min(self.layernum.get()+1, self.size-1))
self.plot_vol(self.layernum.get())
def decrement_layer(self, event=None):
self.layernum.set(max(self.layernum.get()-1, 0))
self.plot_vol(self.layernum.get())
def increment_iter(self, event=None):
self.iter.set(min(self.iter.get()+1, self.max_iter))
if self.iter.get() >= 0:
self.fname.set('data/output/intens_%.3d.bin' % self.iter.get())
self.parse_and_plot()
def decrement_iter(self, event=None):
self.iter.set(max(self.iter.get()-1, 0))
if self.iter.get() >= 0:
self.fname.set('data/output/intens_%.3d.bin' % self.iter.get())
self.parse_and_plot()
def change_iter(self, event=None):
if self.iter.get() >= 0:
self.fname.set('data/output/intens_%.3d.bin' % self.iter.get())
def save_plot(self, event=None):
self.fig.savefig(self.imagename.get(), bbox_inches='tight')
print "Saved to", self.imagename.get()
def save_log_plot(self, event=None):
self.log_fig.savefig(self.log_imagename.get(), bbox_inches='tight')
print "Saved to", self.log_imagename.get()
def quit_(self, event=None):
self.master.quit()
root = Tk.Tk()
plotter = Plotter(root)
root.mainloop()
| gpl-3.0 |
anjsimmo/simple-ml-pipeline | learners/traveltime_linearvol.py | 1 | 2019 | from sklearn import linear_model
import json
import pickle
import numpy as np
import pandas as pd
import numpy as np
import datatables.traveltime
def write_model(regr, model_file):
"""
Write linear model to file
regr -- trained sklearn.linear_model
output_file -- file
"""
model_params = {
'coef': list(regr.coef_),
'intercept': regr.intercept_
}
model_str = json.dumps(model_params)
with open(model_file, 'w') as out_f:
out_f.write(model_str)
def load_model(model_file):
"""
Load linear model from file
model_file -- file
returns -- trained sklearn.linear_model
"""
with open(model_file, 'r') as model_f:
model_str = model_f.read()
model_params = json.loads(model_str)
regr = linear_model.LinearRegression()
regr.coef_ = np.array(model_params['coef'])
regr.intercept_ = model_params['intercept']
return regr
def train(train_data_file, model_file):
data = datatables.traveltime.read_xs(train_data_file)
# Extract Features
# We create the feature $volume^2$, in order to allow the regression algorithm to find quadratic fits.
# Turn list into a n*1 design matrix. At this stage, we only have a single feature in each row.
vol = data['volume'].values[:, np.newaxis]
# Add x^2 as feature to allow quadratic regression
xs = np.hstack([vol, vol**2])
y = data['y'].values # travel times
regr = linear_model.LinearRegression()
regr.fit(xs, y)
write_model(regr, model_file)
def predict(model_file, test_xs_file, output_file):
regr = load_model(model_file)
data = datatables.traveltime.read_xs(test_xs_file)
# Turn list into a n*1 design matrix. At this stage, we only have a single feature in each row.
vol = data['volume'].values[:, np.newaxis]
# Add x^2 as feature to allow quadratic regression
xs = np.hstack([vol, vol**2])
y_pred = regr.predict(xs)
data['pred'] = y_pred
datatables.traveltime.write_pred(data, output_file)
| mit |
nuclear-wizard/moose | test/tests/variables/fe_hermite_convergence/plot.py | 12 | 1471 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import numpy as np
"""
This script makes log-log plots of the error vs. h for the tests in this directory.
"""
filenames = ['hermite_converge_dirichlet_out.csv',
'hermite_converge_periodic_out.csv']
for filename in filenames:
fig = plt.figure()
ax1 = fig.add_subplot(111)
# passing names=True option is supposed to treat first row as column
# header names, and then everything is stored by column name in data.
data = np.genfromtxt(filename, delimiter=',', names=True)
log_h1_error = np.log10(data['H1error'])
log_l2_error = np.log10(data['L2error'])
logh = np.log10(data['h'])
h1_fit = np.polyfit(logh, log_h1_error, 1)
l2_fit = np.polyfit(logh, log_l2_error, 1)
ax1.plot(logh, log_h1_error, linewidth=2, marker='o', label=r'$H^1$ error')
ax1.text(-0.4, -2., '{:.2f}'.format(h1_fit[0]))
ax1.plot(logh, log_l2_error, linewidth=2, marker='o', label=r'$L^2$ error')
ax1.text(-0.4, -3.5, '{:.2f}'.format(l2_fit[0]))
ax1.set_xlabel('log(h)')
ax1.legend(loc='upper left')
plt.savefig(filename.rsplit( ".", 1)[0] + '.pdf')
| lgpl-2.1 |
TomAugspurger/pandas | pandas/tests/indexes/multi/test_reindex.py | 4 | 3745 | import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_reindex(idx):
result, indexer = idx.reindex(list(idx[:4]))
assert isinstance(result, MultiIndex)
assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
result, indexer = idx.reindex(list(idx))
assert isinstance(result, MultiIndex)
assert indexer is None
assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
def test_reindex_level(idx):
index = Index(["one"])
target, indexer = idx.reindex(index, level="second")
target2, indexer2 = index.reindex(idx, level="second")
exp_index = idx.join(index, level="second", how="right")
exp_index2 = idx.join(index, level="second", how="left")
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
with pytest.raises(TypeError, match="Fill method not supported"):
idx.reindex(idx, method="pad", level="second")
with pytest.raises(TypeError, match="Fill method not supported"):
index.reindex(index, method="bfill", level="first")
def test_reindex_preserves_names_when_target_is_list_or_ndarray(idx):
# GH6552
idx = idx.copy()
target = idx.copy()
idx.names = target.names = [None, None]
other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
assert idx.reindex([])[0].names == [None, None]
assert idx.reindex(np.array([]))[0].names == [None, None]
assert idx.reindex(target.tolist())[0].names == [None, None]
assert idx.reindex(target.values)[0].names == [None, None]
assert idx.reindex(other_dtype.tolist())[0].names == [None, None]
assert idx.reindex(other_dtype.values)[0].names == [None, None]
idx.names = ["foo", "bar"]
assert idx.reindex([])[0].names == ["foo", "bar"]
assert idx.reindex(np.array([]))[0].names == ["foo", "bar"]
assert idx.reindex(target.tolist())[0].names == ["foo", "bar"]
assert idx.reindex(target.values)[0].names == ["foo", "bar"]
assert idx.reindex(other_dtype.tolist())[0].names == ["foo", "bar"]
assert idx.reindex(other_dtype.values)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_names_when_target_is_list_or_array():
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["foo", "bar"])
assert idx.reindex([], level=0)[0].names == ["foo", "bar"]
assert idx.reindex([], level=1)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array():
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64
assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_
def test_reindex_base(idx):
idx = idx
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindex_non_unique():
idx = pd.MultiIndex.from_tuples([(0, 0), (1, 1), (1, 1), (2, 2)])
a = pd.Series(np.arange(4), index=idx)
new_idx = pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
msg = "cannot handle a non-unique multi-index!"
with pytest.raises(ValueError, match=msg):
a.reindex(new_idx)
| bsd-3-clause |
vonholst/deeplearning_example_kog | lib/helpers.py | 1 | 5392 | from keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
import json
import numpy as np
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import MaxPooling2D, Conv2D
from keras.constraints import maxnorm
import matplotlib.pyplot as plt
from keras.utils.generic_utils import CustomObjectScope
# from keras.models import load_model
def get_training_parameters(rows=128, cols=128):
img_rows, img_cols = rows, cols
input_shape = (img_rows, img_cols, 3)
image_gen_batch_size = 256
image_scale = 1. / 255.0
epochs = 500
samples_per_epoch = 1000
options = dict(img_rows=img_rows,
img_cols=img_cols,
input_shape=input_shape,
image_gen_batch_size=image_gen_batch_size,
image_scale=image_scale,
epochs=epochs,
samples_per_epoch=samples_per_epoch,
)
return options
def calculate_training_weights(image_generator):
assert isinstance(image_generator, DirectoryIterator), 'Wrong class'
training_examples = dict()
max_training_examples = 0
for class_name in image_generator.class_indices:
class_identifier = image_generator.class_indices[class_name]
number_of_class = np.sum(image_generator.classes == class_identifier)
if number_of_class > max_training_examples:
max_training_examples = number_of_class
training_examples[class_identifier] = number_of_class
training_weights = dict()
for class_identifier in training_examples:
training_weights[class_identifier] = float(max_training_examples) / training_examples[class_identifier]
return training_weights
def generate_images(image_path, target_path):
pass
def create_model(input_shape, number_of_classes):
# Define model architecture
# 1x100 -> (3) 32x100 ,(3) 32x100, [4] 25, (3) 25, (3) 25, [2] 12, (3) 12, (3) 12, [2] 6, FC...
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(number_of_classes, activation='sigmoid'))
return model
def create_coreml_model(model, options, class_indices):
import coremltools
sorted_classes = sorted(class_indices.items(), key=lambda item: item[1])
class_labels_sorted = [str(label) for label, index in sorted_classes]
coreml_model = coremltools.converters.keras.convert(model, input_names='image',
image_input_names='image',
class_labels=class_labels_sorted,
image_scale=options["image_scale"])
return coreml_model
def save_model(model, class_indices, training_history=None):
model_json = model.to_json()
with open("./model/keras_model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
# model.save_weights("./model/keras_model.h5")
model.save("./model/keras_model.h5")
print("Saved model to disk")
with open("./model/keras_model_classes.json", 'w') as outfile:
json.dump(class_indices, outfile)
if training_history is not None:
with open("./model/keras_model_training_history.json", 'w') as outfile:
json.dump(training_history.history, outfile)
def load_model(model_path="./model/keras_model.h5"):
# load json and create model
with open('./model/keras_model.json', 'r') as json_file:
loaded_model_json = json_file.read()
# model = load_model(model_path)
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("./model/keras_model.h5")
with open('./model/keras_model_classes.json') as data_file:
class_indices = json.load(data_file)
return model, class_indices
def plot_training_history(history_dict):
# summarize history for accuracy
plt.subplot(211)
plt.plot(history_dict['acc'])
plt.plot(history_dict['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history_dict['loss'])
plt.plot(history_dict['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| mit |
treycausey/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 2 | 42996 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
import warnings
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (atleast2d_or_csr, check_arrays, check_random_state,
column_or_1d)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..externals import six
from .sgd_fast import plain_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self._validate_params()
self.coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _init_t(self, loss_function):
"""Initialize iteration counter attr ``t_``.
If ``self.learning_rate=='optimal'`` initialize ``t_`` such that
``eta`` at first sample equals ``self.eta0``.
"""
self.t_ = 1.0
if self.learning_rate == "optimal":
typw = np.sqrt(1.0 / np.sqrt(self.alpha))
# computing eta0, the initial learning rate
eta0 = typw / max(1.0, loss_function.dloss(-typw, 1.0))
# initialize t such that eta at first sample equals eta0
self.t_ = 1.0 / (eta0 * self.alpha)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided coef_ does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=np.float64, order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
def _check_fit_data(X, y):
"""Check if shape of input data matches. """
n_samples, _ = X.shape
if n_samples != y.shape[0]:
raise ValueError("Shapes of X and y do not match.")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
if len(est.classes_) == 2:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.coef_[i]
intercept = est.intercept_[i]
return y_i, coef, intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
y_i, coef, intercept = _prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
y_ind = np.searchsorted(self.classes_, y) # XXX use a LabelBinarizer?
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_,
y_ind)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self._init_t(self.loss_function)
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
self.t_ += n_iter * n_samples
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
# need to be 2d
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label : weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_arrays(X, y, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self._init_t(loss_function)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
self.coef_, intercept = plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level.
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
Weights asigned to the features.
`intercept_` : array, shape = [1]
The intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
SGDRegressor(alpha=0.0001, epsilon=0.1, eta0=0.01, fit_intercept=True,
l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss',
n_iter=5, penalty='l2', power_t=0.25, random_state=None,
shuffle=False, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
| bsd-3-clause |
appapantula/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
DTMilodowski/EOlab | src/potentialAGB_Brazil_app_v4.py | 1 | 7366 | """
potentialAGB_Brazil_app_v4.py
================================================================================
Produce layers for restoration opportunity cross-comparison against other data
layers (e.g. WRI world of opportunity maps)
"""
# Import general libraries
import os
import sys
import numpy as np
import xarray as xr
import pandas as pd
import rasterio
import rasterio.mask
import fiona
from copy import deepcopy
# import plotting libraries
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
# import custom libraries
import prepare_EOlab_layers as EO
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/PotentialBiomassRFR/src')
import useful as useful
# set default cmap
plt.set_cmap('viridis')
plt.register_cmap(name='divergent', cmap=sns.diverging_palette(275,150,l=66,s=90,as_cmap=True))
#sns.light_palette('seagreen',as_cmap=True)
"""
#===============================================================================
PART A: DEFINE PATHS AND LOAD IN DATA
- Potential biomass maps (from netcdf file)
- Biome boundaries (Mapbiomas)
- WRI opportunity map
#-------------------------------------------------------------------------------
"""
country_code = 'BRA'
country = 'Brazil'
version = '013'
path2data = '/disk/scratch/local.2/PotentialBiomass/processed/%s/' % country_code
path2model = '/home/dmilodow/DataStore_DTM/FOREST2020/PotentialBiomassRFR/output/'
path2output = '/home/dmilodow/DataStore_DTM/EOlaboratory/EOlab/BrazilPotentialAGB/'
boundaries_shp = '/home/dmilodow/DataStore_DTM/EOlaboratory/Areas/ne_50m_admin_0_tropical_countries_small_islands_removed.shp'
source = ['globbiomass', 'avitabile']
source = ['avitabile']
# create and apply national boundary mask
# - load template raster
template = rasterio.open('%s/agb/Avitabile_AGB_%s_1km.tif' % (path2data,country_code))
# - load shapefile
boundaries = fiona.open(boundaries_shp)
# - for country of interest, make mask
mask = np.zeros(template.shape)
for feat in boundaries:
name = feat['properties']['admin']
if name==country:
image,transform = rasterio.mask.mask(template,[feat['geometry']],crop=False)
mask[image[0]>=0]=1
# load opportunity map
opportunity = xr.open_rasterio('%sWRI_restoration/WRI_restoration_opportunities_%s.tif' % (path2data, country_code))[0]
# Load MapBiomas data for 2005
mb2005 = deepcopy(opportunity)
mb2005.values=useful.load_mapbiomas('BRA',timestep=20,aggregate=1)-1
for ss in source:
# load potential biomass models from netdf file
dataset = xr.open_dataset('%s%s_%s_AGB_potential_RFR_%s_worldclim_soilgrids_final.nc' %
(path2model, country_code,version, ss))
# Convert to C
dataset['AGBpot'].values*=0.48
dataset['AGBobs'].values*=0.48
dataset['AGBpot_min'].values*=0.48
dataset['AGBobs_min'].values*=0.48
dataset['AGBpot_max'].values*=0.48
dataset['AGBobs_max'].values*=0.48
# calculate deficit aka sequestration potential
dataset['AGBseq'] = (dataset['AGBpot']-dataset['AGBobs'])
dataset['AGBseq_min'] = (dataset['AGBpot_min']-dataset['AGBobs_min'])
dataset['AGBseq_max'] = (dataset['AGBpot_max']-dataset['AGBobs_max'])
# Create potential and sequestration layers with settlements
# maintained at original AGB (i.e. feasible restoration)
people_mask = (mb2005.values==5)
dataset['AGBpot_natural']=deepcopy(dataset['AGBpot'])
dataset['AGBpot_natural'].values[people_mask]=dataset['AGBobs'].values[people_mask]
dataset['AGBseq_natural']=deepcopy(dataset['AGBseq'])
dataset['AGBseq_natural'].values[people_mask]=0
"""
PART B: Create data and display layers
- AGBobs
- AGBpot
- AGBseq
- WRI restoration opportunity
- landcover
"""
file_prefix = path2output + country.lower() + '_'
vars = ['AGBobs','AGBpot','AGBseq','AGBpot_natural','AGBseq_natural']
cmaps = ['viridis','viridis','divergent','viridis','divergent']
axis_labels = ['AGB$_{obs}$ / Mg C ha$^{-1}$', 'AGB$_{potential}$ / Mg C ha$^{-1}$', 'Sequestration potential / Mg C ha$^{-1}$', 'AGB$_{potential}$ / Mg C ha$^{-1}$', 'Sequestration potential / Mg C ha$^{-1}$']
ulims = [200,200,100,200,200]
llims = [0,0,-100,0,-100]
for vv,var in enumerate(vars):
print(var)
if var in dataset.keys():
file_prefix = '%s%s_%s_%s' % (path2output, country.lower(), var, ss)
# delete existing dataset if present
if '%s_%s_%s_data.tif' % (country.lower(),var, ss) in os.listdir(path2output):
os.system("rm %s" % ('%s_data.tif' % (file_prefix)))
if '%s_%s_%s_display.tif' % (country.lower(),var, ss) in os.listdir(path2output):
os.system("rm %s" % ('%s_display.tif' % (file_prefix)))
# apply country mask
if ss != 'oda':
dataset[var].values[mask==0] = np.nan
# write display layers
EO.plot_legend(cmaps[vv],ulims[vv],llims[vv],axis_labels[vv], file_prefix)
EO.write_xarray_to_display_layer_GeoTiff(dataset[vars[vv]], file_prefix, cmaps[vv], ulims[vv], llims[vv])
# WRI opportunity map
opportunity.values=opportunity.values.astype('float')
opportunity.values[mask==0]=np.nan
id = np.arange(0,5)
labels = np.asarray( ['existing natural cover','wide-scale','mosaic','remote','urban-agriculture'])
colours = np.asarray(['#67afde', '#00883b', '#00c656', '#004c21', "#6a3b00"])
id_temp,idx_landcover,idx_id = np.intersect1d(opportunity,id,return_indices=True)
id = id[idx_id]
labels=labels[idx_id]
colours=colours[idx_id]
wri_cmap = ListedColormap(sns.color_palette(colours).as_hex())
file_prefix = '%s%s_wri' % (path2output, country.lower())
EO.plot_legend_listed(wri_cmap,labels,'',file_prefix,figsize=[2,1])
if '%s_wri_data.tif' % (country.lower()) in os.listdir(path2output):
os.system("rm %s" % ('%s_data.tif' % (file_prefix)))
if '%s_wri_display.tif' % (country.lower()) in os.listdir(path2output):
os.system("rm %s" % ('%s_display.tif' % (file_prefix)))
EO.write_xarray_to_display_layer_GeoTiff(opportunity, file_prefix, wri_cmap, 4, 0)
# Mapbiomas land cover data
lc_class = np.array(['Natural Forest','Natural Non-Forest','Plantation','Pasture','Agriculture','Urban','Other'])
colours = np.asarray(['#1f4423', '#bbfcac', '#935132', '#ffd966', '#e974ed','#af2a2a','#d5d5e5'])
lc_id = np.arange(0,7)
id_temp,idx_landcover,idx_id = np.intersect1d(mb2005,lc_id,return_indices=True)
lc_id = lc_id[idx_id]
lc_class=lc_class[idx_id]
colours=colours[idx_id]
mb_cmap = ListedColormap(sns.color_palette(colours).as_hex())
mb_cmap_rev = ListedColormap(sns.color_palette(colours[::-1]).as_hex())
file_prefix = '%s%s_mapbiomas' % (path2output, country.lower())
EO.plot_legend_listed(mb_cmap_rev,lc_class[::-1],'',file_prefix,figsize=[2,2])
file_prefix = '%s%s_mapbiomas_lc_2005' % (path2output, country.lower())
if '%s_mapbiomas_lc_2005_data.tif' % (country.lower()) in os.listdir(path2output):
os.system("rm %s" % ('%s%s_data.tif' % (file_prefix,country.lower())))
if '%s_mapbiomas_lc_2005_display.tif' % (country.lower()) in os.listdir(path2output):
os.system("rm %s" % ('%s%s_display.tif' % (file_prefix,country.lower())))
EO.write_xarray_to_display_layer_GeoTiff(mb2005, file_prefix, mb_cmap, 6, 0)
| gpl-3.0 |
mrawls/APO-1m-phot | MWstarplotter.py | 1 | 7490 | from __future__ import print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.image as mpimg
import mpl_toolkits.mplot3d.art3d as art3d
import astropy.units as u
import astropy.coordinates as coord
from astropy.coordinates import SkyCoord
from astropy.coordinates import Distance
'''
Reads in RA, Dec, and distance (with error bars) for a set of stars.
Makes plots of where those stars are in the galaxy.
'''
infile = 'RGEB_distinfo.txt'
target_col = 0
RA_col = 3
Dec_col = 4
dist_col = 7
derr_col = 8
FeH_col = 6
usecols = (target_col, RA_col, Dec_col, dist_col, derr_col, FeH_col)
# Read in target information from a text file
targets, RAs, Decs, dists, derrs, FeHs = np.loadtxt(infile, comments='#', usecols=usecols,
dtype={'names': ('targets', 'RAs', 'Decs', 'dists', 'derrs', 'FeHs'),
'formats': (np.int, '|S11','|S11', np.float64, np.float64, np.float64)}, unpack=True)
# Put the RAs, Decs, and distances in a more useful format
RAs = coord.Angle(RAs, unit=u.hour)
RAs_plot = RAs.wrap_at(180*u.degree) # for reasons
Decs = coord.Angle(Decs, unit=u.degree)
dists = dists*u.pc
derrs = derrs*u.pc
# Define a SkyCoord object for each target
starlocs = []
for target, RA, Dec, dist, derr in zip(targets, RAs, Decs, dists, derrs):
starlocs.append( SkyCoord(ra=RA, dec=Dec, distance=dist) )
#print(starlocs) #IT WORKS! (but no way to include distance uncertainty, I don't think)
# Plot the target locations on a sky plane projection? Maybe useful?
#fig = plt.figure(figsize=(8,6))
#ax = fig.add_subplot(111, projection='mollweide')
#ax.scatter(RAs_plot.radian, Decs.radian)
##ax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])
#ax.grid(True)
#plt.show()
# Make a figure
fig = plt.figure()
# First subplot: galactic (l,b) coordinates in an Aitoff projection
ax = fig.add_subplot(2,1,1, projection='aitoff')
for star in starlocs:
if star.distance > 0: # only consider the targets with distance info
#print(star.galactic)
lrad = star.galactic.l.radian
#if lrad > np.pi:
# lrad = lrad - 2.*np.pi
brad = star.galactic.b.radian
ax.scatter(lrad, brad)
ax.grid(True)
ax.set_title('Galactic (l,b)')
#ax.set_xlim(360., 0.)
#ax.set_ylim(-90., 90.)
#ax.set_xlabel('Galactic Longitude')
#ax.set_ylabel('Galactic Latitude')
# Second subplot: cartesian heliocentric coordinates in an X, Y slice
ax2 = fig.add_subplot(2,2,3, aspect='equal')
for star in starlocs:
if star.distance > 0:
xcart = star.cartesian.x
ycart = star.cartesian.y
zcart = star.cartesian.z
ax2.scatter(xcart, ycart)
#star.representation = 'cylindrical'
#print(xcart, ycart, zcart)
ax2.set_xlim(-2500., 2500.)
ax2.set_ylim(-3000., 3000.)
ax2.set_xlabel('X (pc)')
ax2.set_ylabel('Y (pc)')
plt.plot(0, 0, marker='*', color='y', ms=20)
# Third subplot: cartesian heliocentric coordinates in an X, Z slice
ax3 = fig.add_subplot(2,2,4, aspect='equal')
ax3.set_xlim(-2500., 2500.) # really x
ax3.set_ylim(-3000., 3000.) # actually z
for star in starlocs:
if star.distance > 0:
xcart = star.cartesian.x
ycart = star.cartesian.y
zcart = star.cartesian.z
#print(xcart, ycart, zcart)
ax3.scatter(xcart, zcart)
ax3.set_xlabel('X (pc)')
ax3.set_ylabel('Z (pc)')
plt.plot(0, 0, marker='*', color='y', ms=20)
#plt.show()
# Make a second figure
fig2 = plt.figure()
# Transform stars to galactocentric coordinates (cartesian)
# Set a color scheme as a function of metallicity (different color for every 0.2 dex)
star_galcens = []
colorlist = []
for star, FeH in zip(starlocs, FeHs):
if star.distance > 0:
star_galcens.append(star.transform_to(coord.Galactocentric))
if FeH < -0.8: color='#ffffb2' #yellowest
elif FeH >= -0.8 and FeH < -0.6: color='#fed976'
elif FeH >= -0.6 and FeH < -0.4: color='#feb24c'
elif FeH >= -0.4 and FeH < -0.2: color='#fd8d3c'
elif FeH >= -0.2 and FeH < 0.0: color='#fc4e2a'
elif FeH >= 0.0 and FeH < 0.2: color='#e31a1c'
elif FeH >= 0.2: color='#b10026' #reddest
colorlist.append(color)
#print(star_galcens)
axnew1 = fig2.add_subplot(1,1,1, projection='3d', aspect='equal')
axnew1.set_axis_off()
axnew1.grid(False)
axnew1.xaxis.set_ticklabels([])
axnew1.yaxis.set_ticklabels([])
axnew1.zaxis.set_ticklabels([])
axnew1.xaxis.set_ticks([])
axnew1.yaxis.set_ticks([])
axnew1.zaxis.set_ticks([])
for i, star in enumerate(star_galcens):
#print(star.x, star.y, star.z)
axnew1.scatter(star.x, star.y, star.z, c=colorlist[i], edgecolors='k', s=150)
axnew1.scatter(0, 0, 0, marker='o', c='k', edgecolors='k', s=50) # galactic center
axnew1.scatter(-8300, 0, 27, marker='*', c='k', edgecolors='k', s=150) # Sun
# Contour-type circles that radiate out from the galactic center for reference
circle1 = plt.Circle((0,0), 2000, color='0.75', fill=False)
circle2 = plt.Circle((0,0), 4000, color='0.75', fill=False)
circle3 = plt.Circle((0,0), 6000, color='0.75', fill=False)
circle4 = plt.Circle((0,0), 8000, color='0.75', fill=False)
circle5 = plt.Circle((0,0), 10000, color='0.75', fill=False)
axnew1.add_patch(circle1)
axnew1.add_patch(circle2)
axnew1.add_patch(circle3)
axnew1.add_patch(circle4)
axnew1.add_patch(circle5)
art3d.pathpatch_2d_to_3d(circle1, z=0, zdir='z')
art3d.pathpatch_2d_to_3d(circle2, z=0, zdir='z')
art3d.pathpatch_2d_to_3d(circle3, z=0, zdir='z')
art3d.pathpatch_2d_to_3d(circle4, z=0, zdir='z')
art3d.pathpatch_2d_to_3d(circle5, z=0, zdir='z')
# Colorbar key
axnew2 = fig2.add_subplot(12,1,10)
cmap = mpl.colors.ListedColormap(['#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c'])
cmap.set_over('#b10026') #reddest, high Fe/H
cmap.set_under('#ffffb2') #yellowest, low Fe/H
bounds = [-0.8, -0.6, -0.4, -0.2, 0.0, 0.2]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb = mpl.colorbar.ColorbarBase(axnew2, cmap=cmap, norm=norm, ticks=bounds, extend='both',
boundaries=[-1.0]+bounds+[0.4], spacing='proportional', orientation='horizontal')
cb.set_label('[Fe/H]', size=26)
# manually make a key?
#fig2.text(0.7, 0.7, 'Testing words here', ha='center', va='center', size=26)
# Attempt to plot an image of the Milky Way in the X-Y plane?
#img = mpimg.imread('../../MWimage.png')
#stretch = 1.
#ximg, yimg = np.ogrid[-img.shape[0]/2.*stretch:img.shape[0]/2.*stretch, -img.shape[1]/2.*stretch:img.shape[1]/2.*stretch]
#axnew1.plot_surface(ximg, yimg, 0, rstride=100000, cstride=100000, facecolors=img)
##axnew1.imshow(img)
fig3 = plt.figure()
ax3main = fig3.add_subplot(3,1,2, aspect='equal')
for i, star in enumerate(star_galcens):
rkpc = np.sqrt(star.x*star.x + star.y*star.y)/1000.
zkpc = star.z/1000.
ax3main.scatter(rkpc, zkpc, c=colorlist[i], edgecolor='k', s=150)
#ax3main.scatter(0, 0, marker='o', c='k', edgecolors='k', s=50) # galactic center
ax3main.scatter(8.3, 0.027, marker='*', c='k', edgecolors='k', s=150) # Sun
ax3main.set_xlabel('Galactic radius $R$ (kpc)', size=26)
ax3main.set_ylabel('Height $z$ (kpc)', size=26)
ax3main.set_xlim(6, 9)
#ax3main.set_ylim(-0.1, 0.9)
#plt.xticks( (-8, -6, -4, -2, 0), ('8', '6', '4', '2', '0') )
ax3cb = fig3.add_subplot(15,1,13)
cb3 = mpl.colorbar.ColorbarBase(ax3cb, cmap=cmap, norm=norm, ticks=bounds, extend='both',
boundaries=[-1.0]+bounds+[0.4], spacing='proportional', orientation='horizontal')
cb3.set_label('[Fe/H]', size=26)
plt.show() | mit |
vkscool/nupic | nupic/research/monitor_mixin/monitor_mixin_base.py | 7 | 5503 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType)
return plot
| gpl-3.0 |
anntzer/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 2 | 12181 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used to create prediction
intervals.
"""
# %%
# Generate some data for a synthetic regression problem by applying the
# function f to uniformly sampled random inputs.
import numpy as np
from sklearn.model_selection import train_test_split
def f(x):
"""The function to predict."""
return x * np.sin(x)
rng = np.random.RandomState(42)
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
# %%
# To make the problem interesting, we generate observations of the target y as
# the sum of a deterministic term computed by the function f and a random noise
# term that follows a centered `log-normal
# <https://en.wikipedia.org/wiki/Log-normal_distribution>`_. To make this even
# more interesting we consider the case where the amplitude of the noise
# depends on the input variable x (heteroscedastic noise).
#
# The lognormal distribution is non-symmetric and long tailed: observing large
# outliers is likely but it is impossible to observe small outliers.
sigma = 0.5 + X.ravel() / 10
noise = rng.lognormal(sigma=sigma) - np.exp(sigma ** 2 / 2)
y = expected_y + noise
# %%
# Split into train, test datasets:
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# %%
# Fitting non-linear quantile and least squares regressors
# --------------------------------------------------------
#
# Fit gradient boosting models trained with the quantile loss and
# alpha=0.05, 0.5, 0.95.
#
# The models obtained for alpha=0.05 and alpha=0.95 produce a 90% confidence
# interval (95% - 5% = 90%).
#
# The model trained with alpha=0.5 produces a regression of the median: on
# average, there should be the same number of target observations above and
# below the predicted values.
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_pinball_loss, mean_squared_error
all_models = {}
common_params = dict(
learning_rate=0.05,
n_estimators=250,
max_depth=2,
min_samples_leaf=9,
min_samples_split=9,
)
for alpha in [0.05, 0.5, 0.95]:
gbr = GradientBoostingRegressor(loss='quantile', alpha=alpha,
**common_params)
all_models["q %1.2f" % alpha] = gbr.fit(X_train, y_train)
# %%
# For the sake of comparison, also fit a baseline model trained with the usual
# least squares loss (ls), also known as the mean squared error (MSE).
gbr_ls = GradientBoostingRegressor(loss='ls', **common_params)
all_models["ls"] = gbr_ls.fit(X_train, y_train)
# %%
# Create an evenly spaced evaluation set of input values spanning the [0, 10]
# range.
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
# %%
# Plot the true conditional mean function f, the prediction of the conditional
# mean (least squares loss), the conditional median and the conditional 90%
# interval (from 5th to 95th conditional percentiles).
import matplotlib.pyplot as plt
y_pred = all_models['ls'].predict(xx)
y_lower = all_models['q 0.05'].predict(xx)
y_upper = all_models['q 0.95'].predict(xx)
y_med = all_models['q 0.50'].predict(xx)
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), 'g:', linewidth=3, label=r'$f(x) = x\,\sin(x)$')
plt.plot(X_test, y_test, 'b.', markersize=10, label='Test observations')
plt.plot(xx, y_med, 'r-', label='Predicted median', color="orange")
plt.plot(xx, y_pred, 'r-', label='Predicted mean')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill_between(xx.ravel(), y_lower, y_upper, alpha=0.4,
label='Predicted 90% interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 25)
plt.legend(loc='upper left')
plt.show()
# %%
# Comparing the predicted median with the predicted mean, we note that the
# median is on average below the mean as the noise is skewed towards high
# values (large outliers). The median estimate also seems to be smoother
# because of its natural robustness to outliers.
#
# Also observe that the inductive bias of gradient boosting trees is
# unfortunately preventing our 0.05 quantile to fully capture the sinoisoidal
# shape of the signal, in particular around x=8. Tuning hyper-parameters can
# reduce this effect as shown in the last part of this notebook.
#
# Analysis of the error metrics
# -----------------------------
#
# Measure the models with :func:`mean_squared_error` and
# :func:`mean_pinball_loss` metrics on the training dataset.
import pandas as pd
def highlight_min(x):
x_min = x.min()
return ['font-weight: bold' if v == x_min else ''
for v in x]
results = []
for name, gbr in sorted(all_models.items()):
metrics = {'model': name}
y_pred = gbr.predict(X_train)
for alpha in [0.05, 0.5, 0.95]:
metrics["pbl=%1.2f" % alpha] = mean_pinball_loss(
y_train, y_pred, alpha=alpha)
metrics['MSE'] = mean_squared_error(y_train, y_pred)
results.append(metrics)
pd.DataFrame(results).set_index('model').style.apply(highlight_min)
# %%
# One column shows all models evaluated by the same metric. The minimum number
# on a column should be obtained when the model is trained and measured with
# the same metric. This should be always the case on the training set if the
# training converged.
#
# Note that because the target distribution is asymmetric, the expected
# conditional mean and conditional median are signficiantly different and
# therefore one could not use the least squares model get a good estimation of
# the conditional median nor the converse.
#
# If the target distribution were symmetric and had no outliers (e.g. with a
# Gaussian noise), then median estimator and the least squares estimator would
# have yielded similar predictions.
#
# We then do the same on the test set.
results = []
for name, gbr in sorted(all_models.items()):
metrics = {'model': name}
y_pred = gbr.predict(X_test)
for alpha in [0.05, 0.5, 0.95]:
metrics["pbl=%1.2f" % alpha] = mean_pinball_loss(
y_test, y_pred, alpha=alpha)
metrics['MSE'] = mean_squared_error(y_test, y_pred)
results.append(metrics)
pd.DataFrame(results).set_index('model').style.apply(highlight_min)
# %%
# Errors are higher meaning the models slightly overfitted the data. It still
# shows that the best test metric is obtained when the model is trained by
# minimizing this same metric.
#
# Note that the conditional median estimator is competitive with the least
# squares estimator in terms of MSE on the test set: this can be explained by
# the fact the least squares estimator is very sensitive to large outliers
# which can cause significant overfitting. This can be seen on the right hand
# side of the previous plot. The conditional median estimator is biased
# (underestimation for this asymetric noise) but is also naturally robust to
# outliers and overfits less.
#
# Calibration of the confidence interval
# --------------------------------------
#
# We can also evaluate the ability of the two extreme quantile estimators at
# producing a well-calibrated conditational 90%-confidence interval.
#
# To do this we can compute the fraction of observations that fall between the
# predictions:
def coverage_fraction(y, y_low, y_high):
return np.mean(np.logical_and(y >= y_low, y <= y_high))
coverage_fraction(y_train,
all_models['q 0.05'].predict(X_train),
all_models['q 0.95'].predict(X_train))
# %%
# On the training set the calibration is very close to the expected coverage
# value for a 90% confidence interval.
coverage_fraction(y_test,
all_models['q 0.05'].predict(X_test),
all_models['q 0.95'].predict(X_test))
# %%
# On the test set, the estimated confidence interval is slightly too narrow.
# Note, however, that we would need to wrap those metrics in a cross-validation
# loop to assess their variability under data resampling.
#
# Tuning the hyper-parameters of the quantile regressors
# ------------------------------------------------------
#
# In the plot above, we observed that the 5th percentile regressor seems to
# underfit and could not adapt to sinusoidal shape of the signal.
#
# The hyper-parameters of the model were approximately hand-tuned for the
# median regressor and there is no reason than the same hyper-parameters are
# suitable for the 5th percentile regressor.
#
# To confirm this hypothesis, we tune the hyper-parameters of a new regressor
# of the 5th percentile by selecting the best model parameters by
# cross-validation on the pinball loss with alpha=0.05:
# %%
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
from pprint import pprint
param_grid = dict(
learning_rate=[0.01, 0.05, 0.1],
n_estimators=[100, 150, 200, 250, 300],
max_depth=[2, 5, 10, 15, 20],
min_samples_leaf=[1, 5, 10, 20, 30, 50],
min_samples_split=[2, 5, 10, 20, 30, 50],
)
alpha = 0.05
neg_mean_pinball_loss_05p_scorer = make_scorer(
mean_pinball_loss,
alpha=alpha,
greater_is_better=False, # maximize the negative loss
)
gbr = GradientBoostingRegressor(loss="quantile", alpha=alpha, random_state=0)
search_05p = RandomizedSearchCV(
gbr,
param_grid,
n_iter=10, # increase this if computational budget allows
scoring=neg_mean_pinball_loss_05p_scorer,
n_jobs=2,
random_state=0,
).fit(X_train, y_train)
pprint(search_05p.best_params_)
# %%
# We observe that the search procedure identifies that deeper trees are needed
# to get a good fit for the 5th percentile regressor. Deeper trees are more
# expressive and less likely to underfit.
#
# Let's now tune the hyper-parameters for the 95th percentile regressor. We
# need to redefine the `scoring` metric used to select the best model, along
# with adjusting the alpha parameter of the inner gradient boosting estimator
# itself:
from sklearn.base import clone
alpha = 0.95
neg_mean_pinball_loss_95p_scorer = make_scorer(
mean_pinball_loss,
alpha=alpha,
greater_is_better=False, # maximize the negative loss
)
search_95p = clone(search_05p).set_params(
estimator__alpha=alpha,
scoring=neg_mean_pinball_loss_95p_scorer,
)
search_95p.fit(X_train, y_train)
pprint(search_95p.best_params_)
# %%
# This time, shallower trees are selected and lead to a more constant piecewise
# and therefore more robust estimation of the 95th percentile. This is
# beneficial as it avoids overfitting the large outliers of the log-normal
# additive noise.
#
# We can confirm this intuition by displaying the predicted 90% confidence
# interval comprised by the predictions of those two tuned quantile regressors:
# the prediction of the upper 95th percentile has a much coarser shape than the
# prediction of the lower 5th percentile:
y_lower = search_05p.predict(xx)
y_upper = search_95p.predict(xx)
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), 'g:', linewidth=3, label=r'$f(x) = x\,\sin(x)$')
plt.plot(X_test, y_test, 'b.', markersize=10, label='Test observations')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill_between(xx.ravel(), y_lower, y_upper, alpha=0.4,
label='Predicted 90% interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 25)
plt.legend(loc='upper left')
plt.title("Prediction with tuned hyper-parameters")
plt.show()
# %%
# The plot looks qualitatively better than for the untuned models, especially
# for the shape of the of lower quantile.
#
# We now quantitatively evaluate the joint-calibration of the pair of
# estimators:
coverage_fraction(y_train,
search_05p.predict(X_train),
search_95p.predict(X_train))
# %%
coverage_fraction(y_test,
search_05p.predict(X_test),
search_95p.predict(X_test))
# %%
# The calibration of the tuned pair is sadly not better on the test set: the
# width of the estimated confidence interval is still too narrow.
#
# Again, we would need to wrap this study in a cross-validation loop to
# better assess the variability of those estimates.
| bsd-3-clause |
anntzer/scikit-learn | asv_benchmarks/benchmarks/datasets.py | 11 | 5351 | import numpy as np
import scipy.sparse as sp
from joblib import Memory
from pathlib import Path
from sklearn.decomposition import TruncatedSVD
from sklearn.datasets import (make_blobs, fetch_20newsgroups,
fetch_openml, load_digits, make_regression,
make_classification, fetch_olivetti_faces)
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
# memory location for caching datasets
M = Memory(location=str(Path(__file__).resolve().parent / 'cache'))
@M.cache
def _blobs_dataset(n_samples=500000, n_features=3, n_clusters=100,
dtype=np.float32):
X, _ = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_clusters, random_state=0)
X = X.astype(dtype, copy=False)
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
@M.cache
def _20newsgroups_highdim_dataset(n_samples=None, ngrams=(1, 1),
dtype=np.float32):
newsgroups = fetch_20newsgroups(random_state=0)
vectorizer = TfidfVectorizer(ngram_range=ngrams, dtype=dtype)
X = vectorizer.fit_transform(newsgroups.data[:n_samples])
y = newsgroups.target[:n_samples]
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _20newsgroups_lowdim_dataset(n_components=100, ngrams=(1, 1),
dtype=np.float32):
newsgroups = fetch_20newsgroups()
vectorizer = TfidfVectorizer(ngram_range=ngrams)
X = vectorizer.fit_transform(newsgroups.data)
X = X.astype(dtype, copy=False)
svd = TruncatedSVD(n_components=n_components)
X = svd.fit_transform(X)
y = newsgroups.target
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _mnist_dataset(dtype=np.float32):
X, y = fetch_openml('mnist_784', version=1, return_X_y=True,
as_frame=False)
X = X.astype(dtype, copy=False)
X = MaxAbsScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _digits_dataset(n_samples=None, dtype=np.float32):
X, y = load_digits(return_X_y=True)
X = X.astype(dtype, copy=False)
X = MaxAbsScaler().fit_transform(X)
X = X[:n_samples]
y = y[:n_samples]
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_regression_dataset(n_samples=100000, n_features=100,
dtype=np.float32):
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_features // 10, noise=50,
random_state=0)
X = X.astype(dtype, copy=False)
X = StandardScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_regression_sparse_dataset(n_samples=10000, n_features=10000,
density=0.01, dtype=np.float32):
X = sp.random(m=n_samples, n=n_features, density=density, format='csr',
random_state=0)
X.data = np.random.RandomState(0).randn(X.getnnz())
X = X.astype(dtype, copy=False)
coefs = sp.random(m=n_features, n=1, density=0.5, random_state=0)
coefs.data = np.random.RandomState(0).randn(coefs.getnnz())
y = X.dot(coefs.toarray()).reshape(-1)
y += 0.2 * y.std() * np.random.randn(n_samples)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_classification_dataset(n_samples=1000, n_features=10000,
n_classes=2, dtype=np.float32):
X, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, random_state=0,
n_informative=n_features, n_redundant=0)
X = X.astype(dtype, copy=False)
X = StandardScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _olivetti_faces_dataset():
dataset = fetch_olivetti_faces(shuffle=True, random_state=42)
faces = dataset.data
n_samples, n_features = faces.shape
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
X = faces_centered
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
@M.cache
def _random_dataset(n_samples=1000, n_features=1000,
representation='dense', dtype=np.float32):
if representation == 'dense':
X = np.random.RandomState(0).random_sample((n_samples, n_features))
X = X.astype(dtype, copy=False)
else:
X = sp.random(n_samples, n_features, density=0.05, format='csr',
dtype=dtype, random_state=0)
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/anchored_box04.py | 1 | 1929 | """
==============
Anchored Box04
==============
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from matplotlib.offsetbox import (AnchoredOffsetbox, DrawingArea, HPacker,
TextArea)
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 3))
box1 = TextArea(" Test : ", textprops=dict(color="k"))
box2 = DrawingArea(60, 20, 0, 0)
el1 = Ellipse((10, 10), width=16, height=5, angle=30, fc="r")
el2 = Ellipse((30, 10), width=16, height=5, angle=170, fc="g")
el3 = Ellipse((50, 10), width=16, height=5, angle=230, fc="b")
box2.add_artist(el1)
box2.add_artist(el2)
box2.add_artist(el3)
box = HPacker(children=[box1, box2],
align="center",
pad=0, sep=5)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.,
frameon=True,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
fig.subplots_adjust(top=0.8)
pltshow(plt)
| mit |
marcocaccin/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
bhargav/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 84 | 1221 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
sumeetsk/NEXT-1 | next/apps/AppDashboard.py | 1 | 10726 | import json
import numpy
import numpy.random
from datetime import datetime
from datetime import timedelta
import next.utils as utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import mpld3
MAX_SAMPLES_PER_PLOT = 100
class AppDashboard(object):
def __init__(self, db, ell):
self.db = db
self.ell = ell
def basic_info(self,app,butler):
"""
returns basic statistics like number of queries, participants, etc.
"""
experiment_dict = butler.experiment.get()
#git_hash = rm.get_git_hash_for_exp_uid(exp_uid)
git_hash = experiment_dict.get('git_hash','None')
# start_date = utils.str2datetime(butler.admin.get(uid=app.exp_uid)['start_date'])
start_date = experiment_dict.get('start_date','Unknown')+' UTC'
# participant_uids = rm.get_participant_uids(exp_uid)
participants = butler.participants.get(pattern={'exp_uid':app.exp_uid})
num_participants = len(participants)
queries = butler.queries.get(pattern={'exp_uid':app.exp_uid})
num_queries = len(queries)
return_dict = {'git_hash':git_hash,
'exp_start_data':start_date,
'num_participants':num_participants,
'num_queries':num_queries,
'meta':{'last_dashboard_update':'<1 minute ago'}}
return return_dict
def api_activity_histogram(self, app, butler):
"""
Description: returns the data to plot all API activity (for all algorithms) in a histogram with respect to time for any task in {getQuery,processAnswer,predict}
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
queries = butler.queries.get(pattern={'exp_uid':app.exp_uid})
#self.db.get_docs_with_filter(app_id+':queries',{'exp_uid':exp_uid})
start_date = utils.str2datetime(butler.admin.get(uid=app.exp_uid)['start_date'])
numerical_timestamps = [(utils.str2datetime(item['timestamp_query_generated'])-start_date).total_seconds()
for item in queries]
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'),figsize=(12,1.5))
ax.hist(numerical_timestamps,min(int(1+4*numpy.sqrt(len(numerical_timestamps))),300),alpha=0.5,color='black')
ax.set_frame_on(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.get_yaxis().set_visible(False)
ax.set_xlim(0, max(numerical_timestamps))
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def compute_duration_multiline_plot(self, app, butler, task):
"""
Description: Returns multiline plot where there is a one-to-one mapping lines to
algorithms and each line indicates the durations to complete the task (wrt to the api call)
Expected input:
(string) task : must be in {'getQuery','processAnswer','predict'}
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
alg_list = butler.experiment.get(key='args')['alg_list']
x_min = numpy.float('inf')
x_max = -numpy.float('inf')
y_min = numpy.float('inf')
y_max = -numpy.float('inf')
list_of_alg_dicts = []
for algorithm in alg_list:
alg_label = algorithm['alg_label']
list_of_log_dict,didSucceed,message = butler.ell.get_logs_with_filter(app.app_id+':ALG-DURATION',
{'exp_uid':app.exp_uid,'alg_label':alg_label,'task':task})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
x = []
y = []
t = []
k=0
for item in list_of_log_dict:
k+=1
x.append(k)
y.append( item.get('app_duration',0.) + item.get('duration_enqueued',0.) )
t.append(str(item['timestamp'])[:-3])
x = numpy.array(x)
y = numpy.array(y)
t = numpy.array(t)
num_items = len(list_of_log_dict)
multiplier = min(num_items,MAX_SAMPLES_PER_PLOT)
incr_inds = [ r*num_items/multiplier for r in range(multiplier)]
max_inds = list(numpy.argsort(-y)[0:multiplier])
final_inds = sorted(set(incr_inds + max_inds))
x = list(x[final_inds])
y = list(y[final_inds])
t = list(t[final_inds])
alg_dict = {}
alg_dict['legend_label'] = alg_label
alg_dict['x'] = x
alg_dict['y'] = y
alg_dict['t'] = t
try:
x_min = min(x_min,min(x))
x_max = max(x_max,max(x))
y_min = min(y_min,min(y))
y_max = max(y_max,max(y))
except:
pass
list_of_alg_dicts.append(alg_dict)
return_dict = {}
return_dict['data'] = list_of_alg_dicts
return_dict['plot_type'] = 'multi_line_plot'
return_dict['x_label'] = 'API Call'
return_dict['x_min'] = x_min
return_dict['x_max'] = x_max
return_dict['y_label'] = 'Duration (s)'
return_dict['y_min'] = y_min
return_dict['y_max'] = y_max
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
for alg_dict in list_of_alg_dicts:
ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label'])
ax.set_xlabel('API Call')
ax.set_ylabel('Duration (s)')
ax.set_xlim([x_min,x_max])
ax.set_ylim([y_min,y_max])
ax.grid(color='white', linestyle='solid')
ax.set_title(task, size=14)
legend = ax.legend(loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def compute_duration_detailed_stacked_area_plot(self,app,butler,task,alg_label,detailedDB=False):
"""
Description: Returns stacked area plot for a particular algorithm and task where the durations
are broken down into compute,db_set,db_get (for cpu, database_set, database_get)
Expected input:
(string) task : must be in {'getQuery','processAnswer','predict'}
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_log_dict,didSucceed,message = butler.ell.get_logs_with_filter(app.app_id+':ALG-DURATION',
{'exp_uid':app.exp_uid,'alg_label':alg_label,'task':task})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
y = []
for item in list_of_log_dict:
y.append( item.get('app_duration',0.) + item.get('duration_enqueued',0.) )
y = numpy.array(y)
num_items = len(list_of_log_dict)
multiplier = min(num_items,MAX_SAMPLES_PER_PLOT)
incr_inds = [ k*num_items/multiplier for k in range(multiplier)]
max_inds = list(numpy.argsort(-y)[0:multiplier])
final_inds = sorted(set(incr_inds + max_inds))
x = []
t = []
enqueued = []
admin = []
dbGet = []
dbSet = []
compute = []
max_y_value = 0.
min_y_value = float('inf')
for idx in final_inds:
item = list_of_log_dict[idx]
x.append(idx+1)
t.append(str(item.get('timestamp','')))
_alg_duration = item.get('duration',0.)
_alg_duration_dbGet = item.get('duration_dbGet',0.)
_alg_duration_dbSet = item.get('duration_dbSet',0.)
_duration_enqueued = item.get('duration_enqueued',0.)
_app_duration = item.get('app_duration',0.)
if (_app_duration+_duration_enqueued) > max_y_value:
max_y_value = _app_duration + _duration_enqueued
if (_app_duration+_duration_enqueued) < min_y_value:
min_y_value = _app_duration + _duration_enqueued
enqueued.append(_duration_enqueued)
admin.append(_app_duration-_alg_duration)
dbSet.append(_alg_duration_dbSet)
dbGet.append(_alg_duration_dbGet)
compute.append( _alg_duration - _alg_duration_dbSet - _alg_duration_dbGet )
try:
min_x = min(x)
max_x = max(x)
except:
min_x = 0.
max_x = 0.
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
stack_coll = ax.stackplot(x,compute,dbGet,dbSet,admin,enqueued, alpha=.5)
ax.set_xlabel('API Call')
ax.set_ylabel('Duration (s)')
ax.set_xlim([min_x,max_x])
ax.set_ylim([0.,max_y_value])
ax.grid(color='white', linestyle='solid')
ax.set_title(alg_label+' - '+task, size=14)
proxy_rects = [plt.Rectangle((0, 0), 1, 1, alpha=.5,fc=pc.get_facecolor()[0]) for pc in stack_coll]
legend = ax.legend(proxy_rects, ['compute','dbGet','dbSet','admin','enqueued'],loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def response_time_histogram(self,app,butler,alg_label):
"""
Description: returns the data to plot response time histogram of processAnswer for each algorithm
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_query_dict,didSucceed,message = self.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid,'alg_label':alg_label})
t = []
for item in list_of_query_dict:
try:
t.append(item['response_time'])
except:
pass
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'))
ax.hist(t, bins=min(len(t), MAX_SAMPLES_PER_PLOT), range=(0,30),alpha=0.5,color='black')
ax.set_xlim(0, 30)
ax.set_axis_off()
ax.set_xlabel('Durations (s)')
ax.set_ylabel('Count')
ax.set_title(alg_label + " - response time", size=14)
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def network_delay_histogram(self, app, butler, alg_label):
"""
Description: returns the data to network delay histogram of the time it takes to getQuery+processAnswer for each algorithm
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_query_dict,didSucceed,message = self.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid,'alg_label':alg_label})
t = []
for item in list_of_query_dict:
try:
t.append(item['network_delay'])
except:
pass
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'))
ax.hist(t,MAX_SAMPLES_PER_PLOT,range=(0,5),alpha=0.5,color='black')
ax.set_xlim(0, 5)
ax.set_axis_off()
ax.set_xlabel('Durations (s)')
ax.set_ylabel('Count')
ax.set_title(alg_label + " - network delay", size=14)
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
| apache-2.0 |
sannecottaar/burnman | contrib/CHRU2014/paper_fit_data.py | 5 | 4849 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
paper_fit_data
--------------
This script reproduces :cite:`Cottaar2014` Figure 4.
This example demonstrates BurnMan's functionality to fit thermoelastic data to
both 2nd and 3rd orders using the EoS of the user's choice at 300 K. User's
must create a file with :math:`P, T` and :math:`V_s`. See input_minphys/ for example input
files.
requires:
- compute seismic velocities
teaches:
- averaging
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import scipy.optimize as opt
import burnman
import misc.colors as colors
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
figsize = (6, 5)
prop = {'size': 12}
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
plt.rc('font', family='sans-serif')
figure = plt.figure(dpi=100, figsize=figsize)
def calc_shear_velocities(G_0, Gprime_0, mineral, pressures):
mineral.params['G_0'] = G_0
mineral.params['Gprime_0'] = Gprime_0
shear_velocities = np.empty_like(pressures)
for i in range(len(pressures)):
mineral.set_state(pressures[i], 0.0) # set state with dummy temperature
shear_velocities[i] = mineral.v_s
return shear_velocities
def error(guess, test_mineral, pressures, obs_vs):
vs = calc_shear_velocities(guess[0], guess[1], test_mineral, pressures)
vs_l2 = [(vs[i] - obs_vs[i]) * (vs[i] - obs_vs[i])
for i in range(len(obs_vs))]
l2_error = sum(vs_l2)
return l2_error
if __name__ == "__main__":
mg_perovskite_data = np.loadtxt("Murakami_perovskite.txt")
obs_pressures = mg_perovskite_data[:, 0] * 1.e9
obs_vs = mg_perovskite_data[:, 2] * 1000.
pressures = np.linspace(25.e9, 135.e9, 100)
# make the mineral to fit
guess = [200.e9, 2.0]
mg_perovskite_test = burnman.Mineral()
mg_perovskite_test.params['V_0'] = 24.45e-6
mg_perovskite_test.params['K_0'] = 281.e9
mg_perovskite_test.params['Kprime_0'] = 4.1
mg_perovskite_test.params['molar_mass'] = .10227
# first, do the second-order fit
mg_perovskite_test.set_method("bm2")
func = lambda x: error(x, mg_perovskite_test, obs_pressures, obs_vs)
sol = opt.fmin(func, guess)
print("2nd order fit: G = ", sol[0] / 1.e9, "GPa\tG' = ", sol[1])
model_vs_2nd_order_correct = calc_shear_velocities(
sol[0], sol[1], mg_perovskite_test, pressures)
mg_perovskite_test.set_method("bm3")
model_vs_2nd_order_incorrect = calc_shear_velocities(
sol[0], sol[1], mg_perovskite_test, pressures)
# now do third-order fit
mg_perovskite_test.set_method("bm3")
func = lambda x: error(x, mg_perovskite_test, obs_pressures, obs_vs)
sol = opt.fmin(func, guess)
print("3rd order fit: G = ", sol[0] / 1.e9, "GPa\tG' = ", sol[1])
model_vs_3rd_order_correct = calc_shear_velocities(
sol[0], sol[1], mg_perovskite_test, pressures)
mg_perovskite_test.set_method("bm2")
model_vs_3rd_order_incorrect = calc_shear_velocities(
sol[0], sol[1], mg_perovskite_test, pressures)
plt.plot(
pressures / 1.e9, model_vs_2nd_order_correct / 1000., color=colors.color(3), linestyle='-',
marker='x', markevery=7, linewidth=1.5, label="Correct 2nd order extrapolation")
plt.plot(
pressures / 1.e9, model_vs_2nd_order_incorrect / 1000., color=colors.color(3), linestyle='--',
marker='x', markevery=7, linewidth=1.5, label="2nd order fit, 3rd order extrapolation")
plt.plot(
pressures / 1.e9, model_vs_3rd_order_correct / 1000., color=colors.color(1),
linestyle='-', linewidth=1.5, label="Correct 3rd order extrapolation")
plt.plot(
pressures / 1.e9, model_vs_3rd_order_incorrect / 1000., color=colors.color(1),
linestyle='--', linewidth=1.5, label="3rd order fit, 2nd order extrapolation")
plt.scatter(obs_pressures / 1.e9, obs_vs /
1000., zorder=1000, marker='o', c='w')
plt.ylim([6.7, 8])
plt.xlim([25., 135.])
if "RUNNING_TESTS" not in globals():
plt.ylabel(
r'Shear velocity ${V}_{\mathlarger{\mathlarger{\mathlarger{s}}}}$ (km/s)')
plt.xlabel("Pressure (GPa)")
plt.legend(loc="lower right", prop=prop)
if "RUNNING_TESTS" not in globals():
plt.savefig("example_fit_data.pdf", bbox_inches='tight')
plt.show()
| gpl-2.0 |
tleonhardt/CodingPlayground | dataquest/SQL_and_Databases/next_steps.py | 1 | 2113 | #!/usr/bin/env python
"""
Example looking at answering a few interesting questions using the SQLite database from the CIA
World Factbook:
* Which countries will lose population over the next 35 years?
* Which countries have the lowest/highest population density?
* Which countries receive the most immigrants? Which countries lose the most emigrants?
"""
import pandas as pd
import sqlite3
# Create a connection to the SQLite database
conn = sqlite3.connect('../data/factbook.db')
# Read the facts table into a Pandas DataFrame
query = 'select * from facts;'
facts = pd.read_sql_query(query, conn)
# Which countries will lose population over the next 35 years?
# Sort by population growth and print
lose_pop = facts[facts['population_growth'] < 0]
print("There are {} countries that will lose population!".format(len(lose_pop)))
# If this is true, it is a staggering fact, that NO countries have a negative population growth.
# Actually there is no way this should be true and leads me to believe that whoever extracted the
# data from the original HTML version of the CIA World Factbook didn't do it correctly, and perhaps
# didn't deal with negative numbers.
# Countries like Syria which have been torn by war most definitely have a negative population
# growth rate in recent years, due in part to an increased death rate and in part to an increased
# emmigration rate.
# Which countries have the lowest/highest population density?
# Assumption: population density = population / land_area
# First we need to drop any countries with a NaN or 0 land area_land
facts = facts[facts['area_land'].notnull() & facts['area_land'] != 0]
facts['pop_density'] = facts['population'] / facts['area_land']
lowest_density = facts.sort_values(by='pop_density', ascending=True)
highest_density = facts.sort_values(by='pop_density', ascending=False)
cols = ['name', 'pop_density']
N = 5
print("\nThe countries with the lowest population density are:\n{}".format(lowest_density[cols].head(N)))
print("\nThe countries with the highest population density are:\n{}".format(highest_density[cols].head(N)))
| mit |
Bobeye/LinkMechanismStewartGouph | OFWTP/configure.py | 1 | 30594 | import math
import time
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
# Mechanical Parameters
BOTTOM_RADIUS = 119.3649864910141897009273117677145601037135856257366363864 # distance from the center of the bottom plate to the servo center
TOP_RADIUS = 74.33034373659252761306004106965698325724492756860430780281 # distance from the center of the top plate to the top joint
BOTTOM_ANGLE = 0.546166563433787740559629712911971244663191407124391241530
TOP_ANGLE = 0.343023940420703397073528599413809616687563147674740286598
LINKA = 75.22 # length of the body link connected to the servo, the first part of the link-mechanism leg
LINKB = 120.00# length of the body link connected to the top plate, the second part of the link-mechanism leg
ZEROHEIGHT = 200.0
SERVOHEIGHT = 41.5
# COORDINATS & NUMBER TAG:
# The space origin is always following the right-handed coordinats system. The origin is located at the center of the bottom plate.
# Num 0 tag is always referring to the servo located within the third angle projection. The tagging sequence is following the direcion of anti-clockwise,
# which means the tag 1 is reffering to the servo locating on the right side of the servo 0.
class CONFIGURE:
# data check
def PositiveDataCheck(self):
if BOTTOM_RADIUS <= 0 or TOP_RADIUS <= 0 or BOTTOM_ANGLE <= 0 or TOP_ANGLE <= 0 or LINKA <= 0 or LINKB <= 0:
print("Warning! Strcture dimensions must be positive!")
def OriginPosition(self):
BottomCoordinates = [[BOTTOM_RADIUS * math.cos(BOTTOM_ANGLE), -BOTTOM_RADIUS * math.sin(BOTTOM_ANGLE), 0],
[BOTTOM_RADIUS * math.cos(BOTTOM_ANGLE), BOTTOM_RADIUS * math.sin(BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)-BOTTOM_ANGLE), BOTTOM_RADIUS * math.cos(math.radians(30)-BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)+BOTTOM_ANGLE), BOTTOM_RADIUS * math.cos(math.radians(30)+BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)+BOTTOM_ANGLE), -BOTTOM_RADIUS * math.cos(math.radians(30)+BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)-BOTTOM_ANGLE), -BOTTOM_RADIUS * math.cos(math.radians(30)-BOTTOM_ANGLE), 0]]
# print('BottomCoordinates = ',BottomCoordinates)
TopCoordinates = [[TOP_RADIUS * math.cos(TOP_ANGLE), -TOP_RADIUS * math.sin(TOP_ANGLE), ZEROHEIGHT],
[TOP_RADIUS * math.cos(TOP_ANGLE), TOP_RADIUS * math.sin(TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)-TOP_ANGLE), TOP_RADIUS * math.cos(math.radians(30)-TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)+TOP_ANGLE), TOP_RADIUS * math.cos(math.radians(30)+TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)+TOP_ANGLE), -TOP_RADIUS * math.cos(math.radians(30)+TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)-TOP_ANGLE), -TOP_RADIUS * math.cos(math.radians(30)-TOP_ANGLE), ZEROHEIGHT]]
# print('TopCoordinates = ',TopCoordinates)
ServoCoordinates = BottomCoordinates
for i in range(6):
ServoCoordinates[i][2] = SERVOHEIGHT
# print('ServoCoordinates',ServoCoordinates)
InitialCoordinates = [BottomCoordinates, TopCoordinates, ServoCoordinates]
return InitialCoordinates
def TopplateMotion(self, TopCoordinates, TopMotion):
TempTop = TopCoordinates
temptopz = TempTop[0][2]
for i in range(6):
TempTop[i][2] = 0.0
Top = TempTop
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j]
Top[i][2] = Top[i][2] + temptopz
# print('After-Motion Top plate Coordinates', Top)
return Top
def LegLength(self, AimTopplate, ServoCoordinates):
# Calculate leg length
LegLength = [0.0,1.0,2.0,3.0,4.0,5.0]
for i in range(6):
TempDistance = 0.0
for j in range(3):
TempDistance = TempDistance + ((AimTopplate[i][j]-ServoCoordinates[i][j])**2)
LegLength[i] = math.sqrt(TempDistance)
# print('Leglength = ', LegLength)
return LegLength
def InverseKinematics(self, AimTopplate, ServoCoordinates, LinkA, LinkB):
# Calculate leg length
LegLength = [0.0,1.0,2.0,3.0,4.0,5.0]
for i in range(6):
TempDistance = 0.0
for j in range(3):
TempDistance = TempDistance + ((AimTopplate[i][j]-ServoCoordinates[i][j])**2)
LegLength[i] = math.sqrt(TempDistance)
# print('Leglength = ', LegLength)
# Calculate leg direction
LegAngle = AimTopplate
TempLegAngle = AimTopplate
for i in range(6):
for j in range(3):
LegAngle[i][j] = AimTopplate[i][j] - ServoCoordinates[i][j]
TempLegAngle[i][j] = LegAngle[i][j]
# LegAngle[i][0], LegAngle[i][1] = LegAngle[i][1], -LegAngle[i][0] # Switch the coordinates system from the right-handed to a standard 2D coordinates
# print('LegAngle', LegAngle)
YT = range(6)
ZT = range(6)
for i in range(6):
ZT[i] = LegAngle[i][2]
if i <= 1:
YT[i] = LegAngle[i][1]
elif i == 2:
axisrot = math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 3:
axisrot = math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 4:
axisrot = -math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 5:
axisrot = -math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
# print('YT', YT)
# print('ZT', ZT)
ALPHA = [0.0,1.0,2.0,3.0,4.0,5.0]
AimServoAngle = [0.0,1.0,2.0,3.0,4.0,5.0]
# Motion Planning
for i in range(6):
M = ((LegLength[i] ** 2) + (LinkA ** 2) - (LinkB ** 2)) / (2 * LinkA * ZT[i])
N = YT[i] / ZT[i]
# print('M', M)
# print('N', N)
# cos(alpha) has two results
alpha = 0
if i % 2 == 1:
Alphaa = (M * N + (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
Alphab = (M * N - (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
alphaa = math.acos(Alphaa)
alphab = math.acos(Alphab)
# print('a', alphaa)
# print('b', alphab)
if abs(alphaa) <= 1.5708:
alpha = alphaa
elif abs(alphab) <= 1.5708:
alpha = alphab
ALPHA[i] = alpha
AimServoAngle[i] = 90 - math.degrees(ALPHA[i])
else:
Alphaa = (-(M * N) + (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
Alphab = (-(M * N) - (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
alphaa = math.acos(Alphaa)
alphab = math.acos(Alphab)
# print('a', alphaa)
# print('b', alphab)
if abs(alphaa) <= 1.5708:
alpha = alphaa
elif abs(alphab) <= 1.5708:
alpha = alphab
ALPHA[i] = alpha
AimServoAngle[i] = 90 - math.degrees(ALPHA[i])
# print('ALPHA', ALPHA)
# print('AimServoAngle = ', AimServoAngle)
return AimServoAngle
def MonteCarlo(self):
sampleResolution = 12.0
sampleStep = 4.0
sampleNum = int((sampleResolution*2+1)/sampleStep)**6
# Error range set
# deltaTopplate = [0.1,0.1,0.1,0.1,0.1,0.1] # angls are in degree!!!!!
# deltaTopplate = [1.0,0.0,0.0,0.0,0.0,0.0] # angls are in degree!!!!!
# deltaTopplate = [0.0,1.0,0.0,0.0,0.0,0.0] # angls are in degree!!!!!
# deltaTopplate = [0.0,0.0,1.0,0.0,0.0,0.0] # angls are in degree!!!!!
deltaTopplate = [0.19,0.28,0.29,0.063,0.063,0.2] # angls are in degree!!!!!
# Random
sampleList = [[0],[0],[0],[0],[0],[0]]
sampleTopplate = [0,0,0,0,0,0]
tempsampleList = [0]
for i in range(6):
tempsampleList = np.random.uniform(-deltaTopplate[i],deltaTopplate[i],sampleNum)
for j in range(sampleNum):
sampleList[i].append(tempsampleList[j])
sampleList[i].pop
for i in [3,4,5]:
for j in range(len(sampleList[i])):
sampleList[i][j] = math.radians(sampleList[i][j])
# print('sampleList',sampleList)
print('MonteCarlo sampleNum:', sampleNum)
return sampleList
def ForwardKinematics(sefl, ServoAngle, ServoCoordinates, TopCoordinates, ZeroTopplate, LinkA, LinkB, DBP):
# Degree to radius
for i in range(6):
ServoAngle[i] = math.radians(ServoAngle[i])
# Define the position of the universal joint between LINKA and LINKB
UniversalJointAB = ServoCoordinates
UniversalJointAB = [ [ServoCoordinates[0][0] , ServoCoordinates[0][1]-(LINKA*math.sin(ServoAngle[0])) , ServoCoordinates[0][2]+(LINKA*math.cos(ServoAngle[0]))],
[ServoCoordinates[1][0] , ServoCoordinates[1][1]+(LINKA*math.sin(ServoAngle[1])) , ServoCoordinates[1][2]+(LINKA*math.cos(ServoAngle[1]))],
[ServoCoordinates[2][0]+(LINKA*math.sin(ServoAngle[2])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[2][1]+(LINKA*math.sin(ServoAngle[2])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[2][2]+(LINKA*math.cos(ServoAngle[2]))],
[ServoCoordinates[3][0]-(LINKA*math.sin(ServoAngle[3])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[3][1]-(LINKA*math.sin(ServoAngle[3])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[3][2]+(LINKA*math.cos(ServoAngle[3]))],
[ServoCoordinates[4][0]-(LINKA*math.sin(ServoAngle[4])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[4][1]+(LINKA*math.sin(ServoAngle[4])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[4][2]+(LINKA*math.cos(ServoAngle[4]))],
[ServoCoordinates[5][0]+(LINKA*math.sin(ServoAngle[5])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[5][1]-(LINKA*math.sin(ServoAngle[5])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[5][2]+(LINKA*math.cos(ServoAngle[5]))]]
# print('UniversalJointAB:', UniversalJointAB)
# Check LINKA's working range
def CrossProduct(V1,V2): # cross product of two vectors
for i in range(3):
crossproduct = [V1[1]*V2[2]-V2[1]*V1[2],V1[0]*V2[2]-V2[0]*V1[2],V1[0]*V2[1]-V1[1]*V2[0]]
return crossproduct
def CCW(A,B,C): # See if three points are listed counter clock wise
SegAB = [0,0,0]
SegAC = [0,0,0]
for i in range(3):
SegAB[i] = B[i] - A[i]
SegAC[i] = C[i] - A[i]
if CrossProduct(SegAB,SegAC)[2] > 0:
return True
else:
return False
def Intersect(PA1,PA2,PB1,PB2): # See if line segment PA1-PA2 and PB1-PB2 interacts, TRUE for intersect
return CCW(PA1,PB1,PB2) != CCW(PA2,PB1,PB2) and CCW(PA1,PA2,PB1) != CCW(PA1,PA2,PB2)
def Coplanar(A,B,C,D): # See if four points are coplanar
SegAB = [0,0,0]
SegAC = [0,0,0]
SegAD = [0,0,0]
for i in range(3):
SegAB[i] = B[i] - A[i]
SegAC[i] = C[i] - A[i]
SegAD[i] = D[i] - A[i]
coplanarVec = CrossProduct(CrossProduct(SegAB,SegAC),CrossProduct(SegAB,SegAD))
if coplanarVec[0] == 0 and coplanarVec[1] == 0 and coplanarVec[2] == 0:
return True
else:
return False
# first, see if the segment points of the two links are coplanar, second, see if the two links are interacting
for i in range(6):
if i < 5:
if Coplanar(ServoCoordinates[i],UniversalJointAB[i],ServoCoordinates[i+1],UniversalJointAB[i+1]) == True:
if Intersect(ServoCoordinates[i],UniversalJointAB[i],ServoCoordinates[i+1],UniversalJointAB[i+1]) == True:
print("Warning! Links have intersetions!!!")
else:
print("Links are safe to go!")
else:
if Coplanar(ServoCoordinates[5],UniversalJointAB[5],ServoCoordinates[0],UniversalJointAB[0]) == True:
if Intersect(ServoCoordinates[5],UniversalJointAB[5],ServoCoordinates[0],UniversalJointAB[0]) == True:
print("Warning! Links have intersetions!!!")
else:
print("Links are safe to go!")
# Newton-Raphson Method
print('Newton-Raphson is on!!!')
print('Initial Top Plate = ', TopCoordinates)
print('Initial Servo Plate = ', ServoCoordinates)
print('Given servo angle = ', ServoAngle)
print('UniversalJointAB pos = ', UniversalJointAB)
def F(TopCoordinates,TopMotion,UniversalJointAB,LinkB):
F = [0.00000000,0.000000000,0.0000000000,0.00000000000,0.0000000000,0.0000000000]
TempTop = TopCoordinates
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j] - UniversalJointAB[i][j]
F[i] = math.sqrt(Top[i][0] ** 2 + Top[i][1] ** 2 + Top[i][2] ** 2) - LinkB
return F
# TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# F = F(TopCoordinates,TopMotion,UniversalJointAB,LinkB)
# print('text F result', F)
def f(TopCoordinates,TopMotion,UniversalJointAB,LinkB):
TempTop = TopCoordinates
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j] - UniversalJointAB[i][j]
f = Top
return f
def dF(TopCoordinates,TopMotion):
dF = [[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],]
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
for i in range(6):
# d(f)/d(deltaX) Y Z
dF[i][0] = [1.0,0.0,0.0]
dF[i][1] = [0.0,1.0,0.0]
dF[i][2] = [0.0,0.0,1.0]
# d(f)/d(alpha)
dF[i][3] = [S(gamma)*S(alpha)*Top[i][1] + C(gamma)*S(belta)*C(alpha)*Top[i][1] + S(gamma)*C(alpha)*Top[i][2] - C(gamma)*S(belta)*S(alpha)*Top[i][2],
-C(gamma)*S(alpha)*Top[i][1] + S(gamma)*S(belta)*C(alpha)*Top[i][1] - C(gamma)*C(alpha)*Top[i][2] - S(gamma)*S(belta)*S(alpha)*Top[i][2],
C(belta)*C(alpha)*Top[i][1] - C(belta)*S(alpha)*Top[i][2]]
# d(f)/d(belta)
dF[i][4] = [-C(gamma)*S(belta)*Top[i][0] + C(gamma)*C(belta)*S(alpha)*Top[i][1] + C(gamma)*C(belta)*C(alpha)*Top[i][2],
-S(gamma)*S(belta)*Top[i][0] + S(gamma)*C(belta)*S(alpha)*Top[i][1] + S(gamma)*C(belta)*C(alpha)*Top[i][2],
-C(belta)*Top[i][0] - S(belta)*S(alpha)*Top[i][1] - S(belta)*C(alpha)*Top[i][2]]
# d(f)/d(gamma)
dF[i][5] = [-S(gamma)*C(belta)*Top[i][0] - C(gamma)*C(alpha)*Top[i][1] - S(gamma)*S(belta)*S(alpha)*Top[i][1] + C(gamma)*S(alpha)*Top[i][2] - S(gamma)*S(belta)*C(alpha)*Top[i][2],
C(gamma)*C(belta)*Top[i][0] - S(gamma)*C(alpha)*Top[i][1] + C(gamma)*S(belta)*S(alpha)*Top[i][1] + S(gamma)*S(alpha)*Top[i][2] + C(gamma)*S(belta)*C(alpha)*Top[i][2],
0]
return dF
# TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# dF = dF(TopCoordinates,TopMotion)
# print('text dF result', dF)
# NewtonRaphson: # Xn+1 = Xn - f(Xn)/df(Xn)
resolution = 0.1
count = 1
start = time.time()
CurrentTopMotion = [0.0,0.0,0.0,0.0,0.0,0.0]
NextTopMotion = [0.0,0.0,0.0,0.0,0.0,0.0]
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
F0 = F(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
dF0 = dF(TopCoordinates,CurrentTopMotion)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
f0 = f(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
for i in range(6): # [deltaX, deltaY, deltaZ, alpha, belta, gamma]
Sum = 0.0
for j in range(6): # leg 0 ,1 ,2 3 4 5
Sum = Sum + ( F0[j] / (2 * (dF0[j][i][0] * f0[j][0] + dF0[j][i][1] * f0[j][1] + dF0[j][i][2] * f0[j][2])) )
NextTopMotion[i] = CurrentTopMotion[i] - Sum
print ('NextTopMotion = ', NextTopMotion)
print ('TP', TopCoordinates)
F1 = F(TopCoordinates,NextTopMotion,UniversalJointAB,LinkB)
print('PreviousF: ', F0)
print('NextF: ', F1)
# Permit = 0
# for i in range(6):
# if abs(F1[i]) <= resolution:
# Permit = Permit + 1
# while Permit < 6:
Sum = 0.0
for i in range(6):
Sum = Sum + F1[i]
while Sum >= resolution:
count = count + 1
CurrentTopMotion = NextTopMotion
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
F0 = F(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
dF0 = dF(TopCoordinates,CurrentTopMotion)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
f0 = f(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
for i in range(6): # [deltaX, deltaY, deltaZ, alpha, belta, gamma]
Sum = 0.0
for j in range(6): # leg 0 ,1 ,2 3 4 5
Sum = Sum + ( F0[j] / (2 * (dF0[j][i][0] * f0[j][0] + dF0[j][i][1] * f0[j][1] + dF0[j][i][2] * f0[j][2])) )
NextTopMotion[i] = CurrentTopMotion[i] - Sum
print ('NextTopMotion = ', NextTopMotion)
print ('TP', TopCoordinates)
F1 = F(TopCoordinates,NextTopMotion,UniversalJointAB,LinkB)
print('PreviousF: ', F0)
print('NextF: ', F1)
Sum = 0.0
for i in range(6):
Sum = Sum + F1[i]
# Permit = 0
# for i in range(6):
# if F1[i] <= resolution:
# Permit = Permit + 1
end = time.time()
print ('Iteration Period: ', count, 'Total Time', end-start)
print ('Aim Topplate Motion: ', NextTopMotion)
def main():
# 1
# initial the configure class
configure = CONFIGURE()
# 2
# Initial coordinates setup
InitialCordinates=configure.OriginPosition()
BottomCoordinates=InitialCordinates[0]
TopCoordinates=InitialCordinates[1]
ServoCoordinates=InitialCordinates[2]
# # 3
# # # Move the TOP PLATE
# # TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# # AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotion)
# # 4
# # Inverse Kinematics
# InitialCordinates=configure.OriginPosition()
# BottomCoordinates=InitialCordinates[0]
# TopCoordinates=InitialCordinates[1]
# ServoCoordinates=InitialCordinates[2]
# TopMotion = [0.0,0.0,0.0,0.0,0.0,-0.36] # Angle in radius, given desired topplate motion
# AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotion)
# AimServoPos = configure.InverseKinematics(AimTopplate, ServoCoordinates, LINKA, LINKB)
# print(AimServoPos) # in degrees
# # 5
# # MonteCarlo Accuracy Analysis
# # Move top to zero
# # ZeroTopMotion = [0.1,0.1,0.1,0.0,0.0,0.0] # Angle in radius
# # ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# # ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
# InitialCordinates=configure.OriginPosition()
# BottomCoordinates=InitialCordinates[0]
# TopCoordinates=InitialCordinates[1]
# ServoCoordinates=InitialCordinates[2]
# print('top',TopCoordinates)
# ZeroTopMotion = [0.1,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# ZeroLegLength = configure.LegLength(ZeroAimTopplate, ServoCoordinates)
# ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
# print('ZeroPos', ZeroAimServoPos)
# # ZeroTopMotion = [0.1,0.1,0.1,0.0,0.0,0.0] # Angle in radius
# # ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# # ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
# # print(ZeroAimServoPos)
# # Monte Carlo
# sampleTopplate = configure.MonteCarlo()
# for i in range(len(sampleTopplate)):
# for j in range(6):
# sampleTopplate[i][j] = sampleTopplate[i][j] + ZeroTopMotion[j]
# sampleLegLength = [ZeroLegLength]
# TopMotionList = [ZeroTopMotion]
# AimTopplateList = [ZeroAimTopplate]
# AimServoPosList = [ZeroAimServoPos]
# for i in range(len(sampleTopplate[0])):
# TopMotion = [sampleTopplate[0][i],sampleTopplate[1][i],sampleTopplate[2][i],sampleTopplate[3][i],sampleTopplate[4][i],sampleTopplate[5][i]]
# TopMotionList.append(TopMotion)
# TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# for i in range(len(TopMotionList)):
# TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotionList[i])
# InitialCordinates=configure.OriginPosition()
# TopCoordinates=InitialCordinates[1]
# AimTopplateList.append(AimTopplate)
# # Leg Length Analysis
# for i in range(len(AimTopplateList)):
# LegLength = configure.LegLength(AimTopplateList[i], ServoCoordinates)
# sampleLegLength.append(LegLength)
# # Servo Angle Analysis
# for i in range(1,len(AimTopplateList)):
# AimServoPos = configure.InverseKinematics(AimTopplateList[i], ServoCoordinates, LINKA, LINKB)
# TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# InitialCordinates=configure.OriginPosition()
# ServoCoordinates=InitialCordinates[2]
# AimServoPosList.append(AimServoPos)
# # print('Aim Servo Position', AimServoPosList)
# sampleServoAngle = [[0],[0],[0],[0],[0],[0]]
# for i in range(len(AimServoPosList)):
# for j in range(6):
# sampleServoAngle[j].append(AimServoPosList[i][j])
# # print('Aim Servo Angle Position for each leg', sampleServoAngle)
# TempsampleServoAngle = sampleServoAngle
# for i in range(6):
# sampleServoAngle[i] = sorted(sampleServoAngle[i])
# # MC accuracy data analysis
# goodCount = [0.0,0.0,0.0,0.0,0.0,0.0]
# goodRatio = [0.0,0.0,0.0,0.0,0.0,0.0]
# for i in range(6):
# for angle in sampleServoAngle[i]:
# if angle <= ZeroAimServoPos[i] + 0.5 and angle >= ZeroAimServoPos[i] - 0.5:
# goodCount[i] = goodCount[i] + 1.0
# goodRatio[i] = goodCount[i] / len(sampleServoAngle[i])
# print('Accuracy rate is:' ,goodRatio)
# for i in range(6):
# sampleServoAngle[i] = sampleServoAngle[i][1:len(sampleServoAngle[i])-1]
# # leg 0 handle
# minl0 = sampleServoAngle[0][0]
# maxl0 = sampleServoAngle[0][len(sampleServoAngle[0])-1]
# resolution = (maxl0-minl0) / 1000
# leglist = [0]
# legcount = [0]
# l0 = minl0
# i = 0
# while l0 < maxl0 and i < len(sampleServoAngle[0])-10:
# countl0 = 0
# # print(sampleServoAngle[0][i])
# while sampleServoAngle[0][i] < (l0 + resolution):
# countl0 = countl0+1
# i = i + 1
# legcount.append(countl0)
# leglist.append(l0)
# l0 = l0 + resolution
# print(len(legcount))
# print(len(leglist))
# # # Normal distribution
# # Scount = [0]
# # Mlength = np.median(sampleServoAngle[0])
# # resolution = 0.01
# # limit = 0.6
# # Slength = [0]
# # print(sampleServoAngle[0][0])
# # for i in range(len(sampleServoAngle[0])):
# # if sampleServoAngle[0][i] <=
# plt.figure(1) # MC accuracy analysis figure
# plt.title('MonteCarlo Accuracy Analysis -- Leg Length Accuracy')
# plt.subplot(211)
# plt.grid(True)
# plt.ylabel('Topplate Position')
# plt.xlabel('Sample Number')
# samplePoints = plt.plot(TopMotionList,'.')
# plt.setp(samplePoints, color='y')
# # plt.axis([170,185,0, len(sampleLegLength)])
# plt.subplot(212)
# plt.grid(True)
# plt.ylabel('Sample Number')
# plt.xlabel('Leg Length/mm')
# samplePoints = plt.plot(sampleLegLength,range(len(sampleLegLength)),'.')
# plt.setp(samplePoints, color='g')
# plt.axis([np.median(sampleLegLength)*0.98,np.median(sampleLegLength)*1.02,0, len(sampleLegLength)])
# plt.figure(2) # MC accuracy analysis figure
# plt.title('MonteCarlo Accuracy Analysis -- Servo Angle Accuracy')
# for i in range(6):
# plt.subplot(611 + i)
# plt.grid(True)
# plt.xlabel('Angle-Leg/degree')
# samplePoints = plt.plot(sampleServoAngle[i],range(len(sampleServoAngle[i])),'.')
# plt.setp(samplePoints, color='r')
# plt.axis([sampleServoAngle[i][0], sampleServoAngle[i][len(sampleServoAngle[0])-1], 0, len(sampleServoAngle[i])])
# plt.figure(3)
# plt.title('Monte-Carlo Accuracy Analysis -- #0 Servo Angle Accuracy')
# plt.grid(True)
# plt.ylabel('SampleNumber')
# plt.xlabel('Servo Angle')
# samplePoints = plt.plot(leglist,legcount,'*')
# plt.setp(samplePoints, color='r')
# plt.axis([minl0, maxl0, 0, max(legcount)*1.01])
# plt.show()
# # 6
# # # Forward Kinematics Calculation
# # InitialCordinates=configure.OriginPosition()
# # BottomCoordinates=InitialCordinates[0]
# # TopCoordinates=InitialCordinates[1]
# # ServoCoordinates=InitialCordinates[2]
# # ZeroTopplate = AimTopplate
# # ServoAngle = [25.4388,25.4388,25.4388,25.4388,25.4388,25.4388] # degree
# # # ServoAngle = [0.0,0.0,0.0,0.0,0.0,0.0] # degree
# # configure.ForwardKinematics(ServoAngle, ServoCoordinates, TopCoordinates, ZeroTopplate, LINKA, LINKB, BOTTOM_ANGLE)
if __name__=='__main__':
main() | gpl-3.0 |
MohammedWasim/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
nmayorov/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 16 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
diegocavalca/Studies | phd-thesis/benchmarkings/cs446 project-electric-load-identification-using-machine-learning/src/TestClassifiers.py | 1 | 7262 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 03 19:28:12 2015
Non Intrusive Load Monitoring for Energy Disaggregation for the REDD data
Class project for CS446: Machine Learning @ University of Illinois at Urbana-Champaign
REDD Reference: "J. Zico Kolter and Matthew J. Johnson. REDD: A public data set for
energy disaggregation research. In proceedings of the SustKDD
workshop on Data Mining Applications in Sustainability, 2011."
@authors: Anand Deshmukh, Danny Lohan
University of Illinois at Urbana-Champaign
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
import time
from scipy import interpolate
from MLData import createInstances, deviceErrors
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.lda import LDA
from sklearn.ensemble import RandomForestClassifier
from energyCalcs import actDevEnergy,appDevEnergy,energyComp
from sklearn.cluster import KMeans
for i in range (1,6):
classify = i
if classify == 1:
cLabel = 'Naive Bayes'
clf = MultinomialNB()
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
train_instances = np.load('data11.npy')
train_labels = np.load('data12.npy')
train_labels_binary = np.load('data13.npy')
test_instances = np.load('data14.npy')
test_labels = np.load('data15.npy')
test_labels_binary = np.load('data16.npy')
use_idx = np.load('data17.npy')
device_power = np.load('data18.npy')
device_timer = np.load('data19.npy')
device_power_test = np.load('data110.npy')
device_timer_test = np.load('data111.npy')
else:
train_instances = np.load('data21.npy')
train_labels = np.load('data22.npy')
train_labels_binary = np.load('data23.npy')
test_instances = np.load('data24.npy')
test_labels = np.load('data25.npy')
test_labels_binary = np.load('data26.npy')
use_idx = np.load('data27.npy')
device_power = np.load('data28.npy')
device_timer = np.load('data29.npy')
device_power_test = np.load('data210.npy')
device_timer_test = np.load('data211.npy')
if classify == 2:
cLabel = 'Logistic Regression'
clf = LogisticRegression()
LogisticRegression(C = 10, penalty = 'l2', tol=1e-6)
elif classify == 3:
cLabel = 'SVM'
clf = SVC()
elif classify == 4:
cLabel = 'Linear Discriminant Analysis'
clf = LDA()
elif classify == 5:
cLabel = 'Random Forest Classifier'
clf = RandomForestClassifier(n_estimators=5)
#SVR(C = 1.0, epsilon=0.2)
elif classify ==6:
cLabel = 'K-means clustering'
clf = KMeans(n_clusters=512, init='random')
t0 = time.time()
clf.fit(train_instances, train_labels)
t1 = time.time()
nd = len(use_idx)
# prediction on training and test data
accuracyTr, dev_acc_train, predicted_labels_binary_train = deviceErrors(clf,nd,train_instances,train_labels,train_labels_binary)
accuracyTs, dev_acc_test, predicted_labels_binary_test = deviceErrors(clf,nd,test_instances,test_labels,test_labels_binary)
# prediction of device energy consumption
agg_energy_train = train_instances[:,5]
actEnergy_train = actDevEnergy(device_power,device_timer,nd)
appEnergy_train = appDevEnergy(train_labels_binary,agg_energy_train,nd)
preEnergy_train = appDevEnergy(predicted_labels_binary_train,agg_energy_train,nd)
acTap_train, acTpre_train, apTde_train = energyComp(actEnergy_train, appEnergy_train, preEnergy_train)
t2 = time.time()
agg_energy_test = test_instances[:,5]
actEnergy_test = actDevEnergy(device_power_test,device_timer_test,nd)
appEnergy_test = appDevEnergy(test_labels_binary,agg_energy_test,nd)
preEnergy_test = appDevEnergy(predicted_labels_binary_test,agg_energy_test,nd)
acTap_test, acTpre_test, apTde_test = energyComp(actEnergy_test, appEnergy_test, preEnergy_test)
t3 = time.time()
trainTime = t1-t0
test1Time = t2-t1
test2Time = t3-t2
print '================================================================================'
print 'Classifier = ' + cLabel
print 'Computational Expense for Training Classifier = ' + str(trainTime) + 's'
print '------------------------- Results for Traning Data -----------------------------'
print 'Percent Accuracy on Training Data = ' + str(accuracyTr) + '%'
print 'Percent Accuracy per device on Training Data = ' + str(dev_acc_train) + '%'
print 'Actual Device Energy on Training Data = ' + str(actEnergy_train)
print 'Approx Device Energy on Training Data = ' + str(appEnergy_train)
print 'Predicted Device Energy on Training Data = ' + str(preEnergy_train)
print 'Computational Expense Classifying Training Data = ' + str(test1Time) + 's'
print 'Device Accuracy Approx. vs Actual = ' + str(acTap_train)
print 'Device Accuracy Pre. vs. Actual = ' + str(acTpre_train)
print 'Device Accuracy Pre. vs. approx. = ' + str(apTde_train)
print '------------------------- Results for Test Data -----------------------------'
print 'Percent Accuracy on Test Data = ' + str(accuracyTs) + '%'
print 'Percent Accuracy per device on Test Data = ' + str(dev_acc_test) + '%'
print 'Actual Device Energy on Test Data = ' + str(actEnergy_test)
print 'Approx Device Energy on Test Data = ' + str(appEnergy_test)
print 'Predicted Device Energy on Test Data = ' + str(preEnergy_test)
print 'Computational Expense Classifying Test Data = ' + str(test2Time) + 's'
print 'Device Accuracy Approx. vs Actual = ' + str(acTap_test)
print 'Device Accuracy Pre. vs. Actual = ' + str(acTpre_test)
print 'Device Accuracy Pre. vs. approx. = ' + str(apTde_test)
# compute the energy consumption of each device.
################################################################
# plot 4 of the devices for illustration
#fig = plt.figure(0)
#lendev = len(device_timer[:,0])
#ax1 = plt.subplot(221)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,0])
#ax1.set_title('Electronics')
#plt.ylabel('Device Power (W)')
#
#ax2 = plt.subplot(222)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,1])
#ax2.set_title('Refrigerator')
##plt.ylabel('Device Power (W)')
#
#ax3 = plt.subplot(223)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,3])
#ax3.set_title('Furnace')
#plt.xlabel('Normalized Time')
#plt.ylabel('Device Power (W)')
#
#ax4 = plt.subplot(224)
#plt.plot((device_timer[:,0]-device_timer[0,0])/(device_timer[lendev-1,0]-device_timer[0,0]),device_power[:,5])
#ax4.set_title('Washer Dryer 2')
#plt.xlabel('Normalized Time')
##plt.ylabel('Device Power (W)')
#
#fig = plt.figure(1)
#plt.plot((device_timer[0:288,0]-device_timer[0,0])/(device_timer[288-1,0]-device_timer[0,0]),device_power[0:288,0])
#
#
#plt.show()
#plt.ylabel('Mains Power Consumption (W)')
#plt.xlabel('time (s)') | cc0-1.0 |
bobbymckinney/seebeck_measurement | programs/SeebeckProcessingManual.py | 1 | 10085 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Created: 2016-02-09
@author: Bobby McKinney ([email protected])
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import minimalmodbus as modbus # For communicating with the cn7500s
import time
from datetime import datetime # for getting the current date and time
import exceptions
#==============================================================================
version = '1.0 (2016-02-09)'
###############################################################################
class SeebeckProcessing:
def __init__(self,filepath,datafile,measureList):
#self.Get_User_Input()
#self.filePath = "/Users/tobererlab1/Desktop/Skutt_0p010_PID"
self.filePath = filepath
os.chdir(self.filePath)
self.open_files(datafile)
#self.measureList = [50,75,100,125,150,175,200,225,250,275,300,325,350,375,350,325,300,275,250,225,200,175,150,125,100,75,50]
self.measureList = measureList
self.get_data()
self.plotnumber = 0
self.tolerance = 4.0
index = 0
for temp in self.measureList:
print 'measure temp: ', temp
self.timecalclist = []
self.avgTcalclist = []
self.dTcalclist = []
self.Vchromelcalclist = []
self.Valumelcalclist = []
# bin around an average temp and calculate seebeck
for i in range(index,len(self.time)):
if (self.avgT[i] > (temp-self.tolerance)) and (self.avgT[i] < (temp+self.tolerance)):
index = i
while (self.avgT[index] > (temp-self.tolerance)) and (self.avgT[index] < (temp+self.tolerance)):
self.timecalclist.append(self.time[index])
self.avgTcalclist.append(self.avgT[index])
self.dTcalclist.append(self.dT[index])
self.Vchromelcalclist.append(self.Vch[index])
self.Valumelcalclist.append(self.Val[index])
index += 1
#end while
self.process_data()
self.plotnumber += 1
break
#end if
#end for
#end for
self.save_file()
#end def
#--------------------------------------------------------------------------
def Get_User_Input(self):
self.measureList = input("Please enter the temperatures to measure as a list (example: [50, 75, ...]): ")
print "Your data will be saved to Desktop automatically"
self.folder_name = raw_input("Please enter name for folder: ")
self.folder_name = str(self.folder_name)
if self.folder_name == '':
date = str(datetime.now())
self.folder_name = 'Seebeck_Processed_Data %s.%s.%s' % (date[0:13], date[14:16], date[17:19])
#end if
self.make_new_folder(self.folder_name)
#end def
#--------------------------------------------------------------------------
def make_new_folder(self, folder_name):
self.filePath = "/Users/tobererlab1/Desktop/" + folder_name
found = False
if not os.path.exists(self.filePath):
os.makedirs(self.filePath)
os.chdir(self.filePath)
#end if
else:
n = 1
while found == False:
path = self.filePath + ' - ' + str(n)
if os.path.exists(path):
n = n + 1
#end if
else:
os.makedirs(path)
os.chdir(path)
n = 1
found = True
#end else
#end while
#end else
if found == True:
self.filePath = path
#end if
#end def
#--------------------------------------------------------------------------
def open_files(self,datafile):
self.datafile = open(datafile, 'r') # opens file for writing/overwriting
self.seebeckfile = open('Seebeck.csv', 'w')
seebeckheaders = 'time(s),temperature (C),seebeck_chromel (uV/K),offset_chromel (uV),R^2_chromel,seebeck_alumel (uV/K),offset_alumel (uV),R^2_alumel\n'
self.seebeckfile.write(seebeckheaders)
#end def
#--------------------------------------------------------------------------
def get_data(self):
self.data = self.datafile.readlines()
self.start = self.data.pop(0)
self.quantities = self.data.pop(0).split(',')
self.time = []
self.tempA = []
self.tempB = []
self.avgT = []
self.dT = []
self.Vch = []
self.Val = []
for d in self.data:
self.time.append( float(d.split(',')[0]) )
self.tempA.append( float(d.split(',')[1]) )
self.tempB.append( float(d.split(',')[2]) )
self.avgT.append( float(d.split(',')[3]) )
self.dT.append( float(d.split(',')[4]) )
self.Vch.append( float(d.split(',')[5]) )
self.Val.append( float(d.split(',')[6]) )
#end for
print "length of data: ", len(self.avgT)
#end def
#--------------------------------------------------------------------------
def process_data(self):
print '\n***\n'
print 'process data to get seebeck coefficient'
time = np.average(self.timecalclist)
avgT = np.average(self.avgTcalclist)
dTchromellist = self.dTcalclist
dTalumellist = self.dTcalclist
results_chromel = {}
results_alumel = {}
coeffs_chromel = np.polyfit(dTchromellist, self.Vchromelcalclist, 1)
coeffs_alumel = np.polyfit(dTalumellist,self.Valumelcalclist,1)
# Polynomial Coefficients
polynomial_chromel = coeffs_chromel.tolist()
polynomial_alumel = coeffs_alumel.tolist()
seebeck_chromel = polynomial_chromel[0]
offset_chromel = polynomial_chromel[1]
seebeck_alumel = polynomial_alumel[0]
offset_alumel = polynomial_alumel[1]
print 'seebeck (chromel): %.3f uV/K'%(seebeck_chromel)
print 'seebeck (alumel): %.3f uV/K'%(seebeck_alumel)
print '\n***\n'
# Calculate coefficient of determination (r-squared):
p_chromel = np.poly1d(coeffs_chromel)
p_alumel = np.poly1d(coeffs_alumel)
# fitted values:
yhat_chromel = p_chromel(dTchromellist)
yhat_alumel = p_alumel(dTalumellist)
# mean of values:
ybar_chromel = np.sum(self.Vchromelcalclist)/len(self.Vchromelcalclist)
ybar_alumel = np.sum(self.Valumelcalclist)/len(self.Valumelcalclist)
# regression sum of squares:
ssreg_chromel = np.sum((yhat_chromel-ybar_chromel)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
ssreg_alumel = np.sum((yhat_alumel-ybar_alumel)**2)
# total sum of squares:
sstot_chromel = np.sum((self.Vchromelcalclist - ybar_chromel)**2)
sstot_alumel = np.sum((self.Valumelcalclist - ybar_alumel)**2) # or sum([ (yi - ybar)**2 for yi in y])
rsquared_chromel = ssreg_chromel / sstot_chromel
rsquared_alumel = ssreg_alumel / sstot_alumel
self.seebeckfile.write('%.3f,%.5f,%.5f,%.5f,%.5f,%.5f,%.5f,%.5f\n'%(time,avgT,seebeck_chromel,offset_chromel,rsquared_chromel,seebeck_alumel,offset_alumel,rsquared_alumel))
fitchromel = {}
fitalumel = {}
fitchromel['polynomial'] = polynomial_chromel
fitalumel['polynomial'] = polynomial_alumel
fitchromel['r-squared'] = rsquared_chromel
fitalumel['r-squared'] = rsquared_alumel
celsius = u"\u2103"
self.create_plot(dTalumellist,dTchromellist,self.Valumelcalclist,self.Vchromelcalclist,fitalumel,fitchromel,str(self.plotnumber)+'_'+str(avgT)+ 'C')
#end def
#--------------------------------------------------------------------------
def create_plot(self, xalumel, xchromel, yalumel, ychromel, fitalumel, fitchromel, title):
print 'create seebeck plot'
dpi = 400
plt.ioff()
# Create Plot:
fig = plt.figure(self.plotnumber, dpi=dpi)
ax = fig.add_subplot(111)
ax.grid()
ax.set_title(title)
ax.set_xlabel("dT (K)")
ax.set_ylabel("dV (uV)")
# Plot data points:
ax.scatter(xalumel, yalumel, color='r', marker='.', label="alumel Voltage")
ax.scatter(xchromel, ychromel, color='b', marker='.', label="chromel Voltage")
# Overlay linear fits:
coeffsalumel = fitalumel['polynomial']
coeffschromel = fitchromel['polynomial']
p_alumel = np.poly1d(coeffsalumel)
p_chromel = np.poly1d(coeffschromel)
xp = np.linspace(min(xalumel+xchromel), max(xalumel+xchromel), 5000)
alumel_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsalumel[0], coeffsalumel[1])
chromel_eq = 'dV = %.2f*(dT) + %.2f' % (coeffschromel[0], coeffschromel[1])
ax.plot(xp, p_alumel(xp), '-', c='#FF9900', label="alumel Voltage Fit\n %s" % alumel_eq)
ax.plot(xp, p_chromel(xp), '-', c='g', label="chromel Voltage Fit\n %s" % chromel_eq)
ax.legend(loc='upper left', fontsize='10')
# Save:
plot_folder = self.filePath + '/Seebeck Plots/'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
fig.savefig('%s.png' % (plot_folder + title) , dpi=dpi)
plt.close()
#end def
#--------------------------------------------------------------------------
def save_file(self):
print('\nSave Files\n')
self.seebeckfile.close()
#end def
#end class
###############################################################################
#==============================================================================
if __name__=='__main__':
runprogram = SeebeckProcessing("/Users/tobererlab1/Desktop/Skutt_0p010_PID")
#end if | gpl-3.0 |
nof20/BitcoinModel | Signals/BitcoinData.py | 1 | 2795 | """ Module to download Bitcoin prices from Quandl.
See https://www.quandl.com/data/GDAX/USD-BTC-USD-Exchange-Rate
"""
import configparser
import datetime
import quandl
import pandas as pd
import numpy as np
from couchdb.mapping import Document, FloatField, DateField, TextField
from Tools.DBCache import DBCache
class BitcoinData(object):
#TODO: Define an abc (interface) for these common methods.
TICKER = "GDAX/USD"
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read("config.ini")
self.db = DBCache()
def get(self, start_date, end_date, cached=True):
if cached:
df = self.get_db(start_date, end_date)
else:
df = self.get_ws(start_date, end_date)
self.set_db(df)
return df
def get_ws(self, start_date, end_date):
"""Return DataFrame of prices between selected dates."""
start_date = DBCache.datetime_string(start_date)
end_date = DBCache.datetime_string(end_date)
series = quandl.get(
self.TICKER,
api_key=self.config['Quandl']['authtoken'],
start_date=start_date,
end_date=end_date)
return series
def get_db(self, start_date, end_date):
view = self.db.get_view("DBCache_views/BitcoinData")
start_date = DBCache.datetime_string(start_date)
end_date = DBCache.datetime_string(end_date)
rows = view[start_date:end_date]
df = BitcoinDoc.get_df_from_rows(rows)
return df
def set_db(self, df):
doclist = BitcoinDoc.get_doclist_from_df(df)
# TODO: Prevent saving of duplicates
self.db.save_doc_list(doclist)
class BitcoinDoc(Document):
"""ORM for CouchDB."""
Type = TextField()
Open = FloatField()
High = FloatField()
Low = FloatField()
Volume = FloatField()
Date = DateField()
@staticmethod
def get_doclist_from_df(df):
df2 = df.reset_index()
ll = []
for row in df2.itertuples():
doc = BitcoinDoc()
doc.Type = "BitcoinData"
if ~np.isnan(row.Open):
doc.Open = row.Open
if ~np.isnan(row.High):
doc.High = row.High
if ~np.isnan(row.Low):
doc.Low = row.Low
if ~np.isnan(row.Volume):
doc.Volume = row.Volume
doc.Date = row.Date.to_pydatetime()
ll.append(doc)
return ll
@staticmethod
def get_df_from_rows(rows):
ll = [row.value for row in rows]
df = pd.DataFrame(ll)
df.drop(["Type", "_id", "_rev"], axis=1, inplace=True)
df['Date'] = pd.to_datetime(df['Date'])
df.set_index("Date", inplace=True)
return df
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.