repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pypot/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
GJL/flink | flink-python/pyflink/table/table.py | 4 | 32961 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from py4j.java_gateway import get_method
from pyflink.java_gateway import get_gateway
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import create_arrow_schema
from pyflink.table.utils import tz_convert_from_internal
from pyflink.util.utils import to_jarray
from pyflink.util.utils import to_j_explain_detail_arr
__all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable']
class Table(object):
"""
A :class:`~pyflink.table.Table` is the core component of the Table API.
Similar to how the batch and streaming APIs have DataSet and DataStream,
the Table API is built around :class:`~pyflink.table.Table`.
Use the methods of :class:`~pyflink.table.Table` to transform data.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> env.set_parallelism(1)
>>> t_env = StreamTableEnvironment.create(env)
>>> ...
>>> t_env.register_table_source("source", ...)
>>> t = t_env.scan("source")
>>> t.select(...)
>>> ...
>>> t_env.register_table_sink("result", ...)
>>> t.insert_into("result")
>>> t_env.execute("table_job")
Operations such as :func:`~pyflink.table.Table.join`, :func:`~pyflink.table.Table.select`,
:func:`~pyflink.table.Table.where` and :func:`~pyflink.table.Table.group_by`
take arguments in an expression string. Please refer to the documentation for
the expression syntax.
"""
def __init__(self, j_table):
self._j_table = j_table
def select(self, fields):
"""
Performs a selection operation. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions.
Example:
::
>>> tab.select("key, value + 'hello'")
:param fields: Expression string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.select(fields))
def alias(self, field, *fields):
"""
Renames the fields of the expression result. Use this to disambiguate fields before
joining to operations.
Example:
::
>>> tab.alias("a", "b")
:param field: Field alias.
:type field: str
:param fields: Additional field aliases.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
gateway = get_gateway()
extra_fields = to_jarray(gateway.jvm.String, fields)
return Table(get_method(self._j_table, "as")(field, extra_fields))
def filter(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.filter("name = 'Fred'")
:param predicate: Predicate expression string.
:type predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.filter(predicate))
def where(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.where("name = 'Fred'")
:param predicate: Predicate expression string.
:type predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.where(predicate))
def group_by(self, fields):
"""
Groups the elements on some grouping keys. Use this before a selection with aggregations
to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
Example:
::
>>> tab.group_by("key").select("key, value.avg")
:param fields: Group keys.
:type fields: str
:return: The grouped table.
:rtype: pyflink.table.GroupedTable
"""
return GroupedTable(self._j_table.groupBy(fields))
def distinct(self):
"""
Removes duplicate values and returns only distinct (different) values.
Example:
::
>>> tab.select("key, value").distinct()
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.distinct())
def join(self, right, join_predicate=None):
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary. You can use where and select clauses after a join to further specify the
behaviour of the join.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` .
Example:
::
>>> left.join(right).where("a = b && c > 3").select("a, b, d")
>>> left.join(right, "a = b")
:param right: Right table.
:type right: pyflink.table.Table
:param join_predicate: Optional, the join predicate expression string.
:type join_predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
if join_predicate is not None:
return Table(self._j_table.join(right._j_table, join_predicate))
else:
return Table(self._j_table.join(right._j_table))
def left_outer_join(self, right, join_predicate=None):
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL left outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.left_outer_join(right).select("a, b, d")
>>> left.left_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:type right: pyflink.table.Table
:param join_predicate: Optional, the join predicate expression string.
:type join_predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoin(right._j_table))
else:
return Table(self._j_table.leftOuterJoin(right._j_table, join_predicate))
def right_outer_join(self, right, join_predicate):
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL right outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.right_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:type right: pyflink.table.Table
:param join_predicate: The join predicate expression string.
:type join_predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.rightOuterJoin(right._j_table, join_predicate))
def full_outer_join(self, right, join_predicate):
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL full outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.full_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:type right: pyflink.table.Table
:param join_predicate: The join predicate expression string.
:type join_predicate: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.fullOuterJoin(right._j_table, join_predicate))
def join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to a SQL inner
join but works with a table function. Each row of the table is joined with the rows
produced by the table function.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.join_lateral("split(text, ' ') as (b)", "a = b")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: pyflink.table.Table
"""
if join_predicate is None:
return Table(self._j_table.joinLateral(table_function_call))
else:
return Table(self._j_table.joinLateral(table_function_call, join_predicate))
def left_outer_join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to
a SQL left outer join but works with a table function. Each row of the table is joined
with all rows produced by the table function. If the join does not produce any row, the
outer row is padded with nulls.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.left_outer_join_lateral("split(text, ' ') as (b)")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: pyflink.table.Table
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoinLateral(table_function_call))
else:
return Table(self._j_table.leftOuterJoinLateral(table_function_call, join_predicate))
def minus(self, right):
"""
Minus of two :class:`~pyflink.table.Table` with duplicate records removed.
Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
exist in the right table. Duplicate records in the left table are returned
exactly once, i.e., duplicates are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.minus(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.minus(right._j_table))
def minus_all(self, right):
"""
Minus of two :class:`~pyflink.table.Table`. Similar to a SQL EXCEPT ALL.
Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
the right table. A record that is present n times in the left table and m times
in the right table is returned (n - m) times, i.e., as many duplicates as are present
in the right table are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.minus_all(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.minusAll(right._j_table))
def union(self, right):
"""
Unions two :class:`~pyflink.table.Table` with duplicate records removed.
Similar to a SQL UNION. The fields of the two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.union(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.union(right._j_table))
def union_all(self, right):
"""
Unions two :class:`~pyflink.table.Table`. Similar to a SQL UNION ALL. The fields of the
two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.union_all(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.unionAll(right._j_table))
def intersect(self, right):
"""
Intersects two :class:`~pyflink.table.Table` with duplicate records removed. Intersect
returns records that exist in both tables. If a record is present in one or both tables
more than once, it is returned just once, i.e., the resulting table has no duplicate
records. Similar to a SQL INTERSECT. The fields of the two intersect operations must fully
overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.intersect(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.intersect(right._j_table))
def intersect_all(self, right):
"""
Intersects two :class:`~pyflink.table.Table`. IntersectAll returns records that exist in
both tables. If a record is present in both tables more than once, it is returned as many
times as it is present in both tables, i.e., the resulting table might have duplicate
records. Similar to an SQL INTERSECT ALL. The fields of the two intersect operations must
fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.intersect_all(right)
:param right: Right table.
:type right: pyflink.table.Table
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.intersectAll(right._j_table))
def order_by(self, fields):
"""
Sorts the given :class:`~pyflink.table.Table`. Similar to SQL ORDER BY.
The resulting Table is sorted globally sorted across all parallel partitions.
Example:
::
>>> tab.order_by("name.desc")
:param fields: Order fields expression string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.orderBy(fields))
def offset(self, offset):
"""
Limits a sorted result from an offset position.
Similar to a SQL OFFSET clause. Offset is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a subsequent
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
::
# skips the first 3 rows and returns all following rows.
>>> tab.order_by("name.desc").offset(3)
# skips the first 10 rows and returns the next 5 rows.
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param offset: Number of records to skip.
:type offset: int
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.offset(offset))
def fetch(self, fetch):
"""
Limits a sorted result to the first n rows.
Similar to a SQL FETCH clause. Fetch is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a preceding
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
Returns the first 3 records.
::
>>> tab.order_by("name.desc").fetch(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param fetch: The number of records to return. Fetch must be >= 0.
:type fetch: int
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.fetch(fetch))
def window(self, window):
"""
Defines group window on the records of a table.
A group window groups the records of a table by assigning them to windows defined by a time
or row interval.
For streaming tables of infinite size, grouping into windows is required to define finite
groups on which group-based aggregates can be computed.
For batch tables of finite size, windowing essentially provides shortcuts for time-based
groupBy.
.. note::
Computing windowed aggregates on a streaming table is only a parallel operation
if additional grouping attributes are added to the
:func:`~pyflink.table.GroupWindowedTable.group_by` clause.
If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow
alias, the streamed table will be processed by a single task, i.e., with parallelism 1.
Example:
::
>>> tab.window(Tumble.over("10.minutes").on("rowtime").alias("w")) \\
... .group_by("w") \\
... .select("a.sum as a, w.start as b, w.end as c, w.rowtime as d")
:param window: A :class:`~pyflink.table.window.GroupWindow` created from
:class:`~pyflink.table.window.Tumble`, :class:`~pyflink.table.window.Session`
or :class:`~pyflink.table.window.Slide`.
:type window: pyflink.table.window.GroupWindow
:return: A group windowed table.
:rtype: GroupWindowedTable
"""
return GroupWindowedTable(self._j_table.window(window._java_window))
def over_window(self, *over_windows):
"""
Defines over-windows on the records of a table.
An over-window defines for each record an interval of records over which aggregation
functions can be computed.
Example:
::
>>> table.window(Over.partition_by("c").order_by("rowTime") \\
... .preceding("10.seconds").alias("ow")) \\
... .select("c, b.count over ow, e.sum over ow")
.. note::
Computing over window aggregates on a streaming table is only a parallel
operation if the window is partitioned. Otherwise, the whole stream will be processed
by a single task, i.e., with parallelism 1.
.. note::
Over-windows for batch tables are currently not supported.
:param over_windows: over windows created from :class:`~pyflink.table.window.Over`.
:type over_windows: pyflink.table.window.OverWindow
:return: A over windowed table.
:rtype: pyflink.table.OverWindowedTable
"""
gateway = get_gateway()
window_array = to_jarray(gateway.jvm.OverWindow,
[item._java_over_window for item in over_windows])
return OverWindowedTable(self._j_table.window(window_array))
def add_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. It will throw an
exception if the added fields already exist.
Example:
::
>>> tab.add_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.addColumns(fields))
def add_or_replace_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. Existing fields will be
replaced if add columns name is the same as the existing column name. Moreover, if the added
fields have duplicate field name, then the last one is used.
Example:
::
>>> tab.add_or_replace_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.addOrReplaceColumns(fields))
def rename_columns(self, fields):
"""
Renames existing columns. Similar to a field alias statement. The field expressions
should be alias expressions, and only the existing fields can be renamed.
Example:
::
>>> tab.rename_columns("a as a1, b as b1")
:param fields: Column list string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.renameColumns(fields))
def drop_columns(self, fields):
"""
Drops existing columns. The field expressions should be field reference expressions.
Example:
::
>>> tab.drop_columns("a, b")
:param fields: Column list string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.dropColumns(fields))
def insert_into(self, table_path):
"""
Writes the :class:`~pyflink.table.Table` to a :class:`~pyflink.table.TableSink` that was
registered under the specified name. For the path resolution algorithm see
:func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.insert_into("sink")
:param table_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:type table_path: str
.. note:: Deprecated in 1.11. Use :func:`execute_insert` for single sink,
use :class:`TableTableEnvironment`#:func:`create_statement_set`
for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use execute_insert for single sink, "
"use TableTableEnvironment#create_statement_set for multiple sinks.",
DeprecationWarning)
self._j_table.insertInto(table_path)
def to_pandas(self):
"""
Converts the table to a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
>>> table = table_env.from_pandas(pdf, ["a", "b"])
>>> table.filter("a > 0.5").to_pandas()
:return: the result pandas DataFrame.
"""
gateway = get_gateway()
max_arrow_batch_size = self._j_table.getTableEnvironment().getConfig().getConfiguration()\
.getInteger(gateway.jvm.org.apache.flink.python.PythonOptions.MAX_ARROW_BATCH_SIZE)
batches = gateway.jvm.org.apache.flink.table.runtime.arrow.ArrowUtils\
.collectAsPandasDataFrame(self._j_table, max_arrow_batch_size)
if batches.hasNext():
import pytz
timezone = pytz.timezone(
self._j_table.getTableEnvironment().getConfig().getLocalTimeZone().getId())
serializer = ArrowSerializer(
create_arrow_schema(self.get_schema().get_field_names(),
self.get_schema().get_field_data_types()),
self.get_schema().to_row_data_type(),
timezone)
import pyarrow as pa
table = pa.Table.from_batches(serializer.load_from_iterator(batches))
pdf = table.to_pandas()
schema = self.get_schema()
for field_name in schema.get_field_names():
pdf[field_name] = tz_convert_from_internal(
pdf[field_name], schema.get_field_data_type(field_name), timezone)
return pdf
else:
import pandas as pd
return pd.DataFrame.from_records([], columns=self.get_schema().get_field_names())
def get_schema(self):
"""
Returns the :class:`~pyflink.table.TableSchema` of this table.
:return: The schema of this table.
:rtype: pyflink.table.TableSchema
"""
return TableSchema(j_table_schema=self._j_table.getSchema())
def print_schema(self):
"""
Prints the schema of this table to the console in a tree format.
"""
self._j_table.printSchema()
def execute_insert(self, table_path, overwrite=False):
"""
Writes the :class:`~pyflink.table.Table` to a :class:`~pyflink.table.TableSink` that was
registered under the specified name, and then execute the insert operation.
For the path resolution algorithm see :func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.execute_insert("sink")
:param table_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:type table_path: str
:param overwrite: The flag that indicates whether the insert should overwrite
existing data or not.
:type overwrite: bool
:return: The table result.
"""
# TODO convert java TableResult to python TableResult once FLINK-17303 is finished
self._j_table.executeInsert(table_path, overwrite)
def execute(self):
"""
Collects the contents of the current table local client.
Example:
::
>>> tab.execute()
:return: The content of the table.
"""
# TODO convert java TableResult to python TableResult once FLINK-17303 is finished
self._j_table.execute()
def explain(self, *extra_details):
"""
Returns the AST of this table and the execution plan.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:type extra_details: tuple[ExplainDetail] (variable-length arguments of ExplainDetail)
:return: The statement for which the AST and execution plan will be returned.
:rtype: str
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_table.explain(j_extra_details)
def __str__(self):
return self._j_table.toString()
class GroupedTable(object):
"""
A table that has been grouped on a set of grouping keys.
"""
def __init__(self, java_table):
self._j_table = java_table
def select(self, fields):
"""
Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> tab.group_by("key").select("key, value.avg + ' The average' as average")
:param fields: Expression string that contains group keys and aggregate function calls.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.select(fields))
class GroupWindowedTable(object):
"""
A table that has been windowed for :class:`~pyflink.table.GroupWindow`.
"""
def __init__(self, java_group_windowed_table):
self._j_table = java_group_windowed_table
def group_by(self, fields):
"""
Groups the elements by a mandatory window and one or more optional grouping attributes.
The window is specified by referring to its alias.
If no additional grouping attribute is specified and if the input is a streaming table,
the aggregation will be performed by a single task, i.e., with parallelism 1.
Aggregations are performed per group and defined by a subsequent
:func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY
query.
Example:
::
>>> tab.window(group_window.alias("w")).group_by("w, key").select("key, value.avg")
:param fields: Group keys.
:type fields: str
:return: A window grouped table.
:rtype: pyflink.table.WindowGroupedTable
"""
return WindowGroupedTable(self._j_table.groupBy(fields))
class WindowGroupedTable(object):
"""
A table that has been windowed and grouped for :class:`~pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_window_grouped_table):
self._j_table = java_window_grouped_table
def select(self, fields):
"""
Performs a selection operation on a window grouped table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> window_grouped_table.select("key, window.start, value.avg as valavg")
:param fields: Expression string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.select(fields))
class OverWindowedTable(object):
"""
A table that has been windowed for :class:`~pyflink.table.window.OverWindow`.
Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse
rows. Instead over window aggregates compute an aggregate for each input row over a range of
its neighboring rows.
"""
def __init__(self, java_over_windowed_table):
self._j_table = java_over_windowed_table
def select(self, fields):
"""
Performs a selection operation on a over windowed table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> over_windowed_table.select("c, b.count over ow, e.sum over ow")
:param fields: Expression string.
:type fields: str
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_table.select(fields))
| apache-2.0 |
zihua/scikit-learn | sklearn/decomposition/dict_learning.py | 42 | 46134 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
ARudiuk/mne-python | tutorials/plot_sensors_decoding.py | 5 | 3140 | """
==========================
Decoding sensor space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in sensor space. Here the classifier is applied to every time
point.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import StratifiedKFold
import mne
from mne.datasets import sample
from mne.decoding import TimeDecoding, GeneralizationAcrossTime
data_path = sample.data_path()
plt.close('all')
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
epochs_list = [epochs[k] for k in event_id]
mne.epochs.equalize_epoch_counts(epochs_list)
data_picks = mne.pick_types(epochs.info, meg=True, exclude='bads')
###############################################################################
# Temporal decoding
# -----------------
#
# We'll use the default classifer for a binary classification problem
# which is a linear Support Vector Machine (SVM).
td = TimeDecoding(predict_mode='cross-validation', n_jobs=1)
# Fit
td.fit(epochs)
# Compute accuracy
td.score(epochs)
# Plot scores across time
td.plot(title='Sensor space decoding')
###############################################################################
# Generalization Across Time
# --------------------------
#
# Here we'll use a stratified cross-validation scheme.
# make response vector
y = np.zeros(len(epochs.events), dtype=int)
y[epochs.events[:, 2] == 3] = 1
cv = StratifiedKFold(y=y) # do a stratified cross-validation
# define the GeneralizationAcrossTime object
gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=1,
cv=cv, scorer=roc_auc_score)
# fit and score
gat.fit(epochs, y=y)
gat.score(epochs)
# let's visualize now
gat.plot()
gat.plot_diagonal()
###############################################################################
# Exercise
# --------
# - Can you improve the performance using full epochs and a common spatial
# pattern (CSP) used by most BCI systems?
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# Have a look at the example
# :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_space.py`
| bsd-3-clause |
skearnes/pylearn2 | pylearn2/scripts/datasets/browse_small_norb.py | 4 | 6492 | #!/usr/bin/env python
import sys, argparse, pickle
import numpy
from matplotlib import pyplot
from pylearn2.datasets import norb
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
madjelan/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/indexes/numeric.py | 2 | 14865 | import warnings
import numpy as np
from pandas._libs import index as libindex
import pandas.compat as compat
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
is_bool, is_bool_dtype, is_dtype_equal, is_extension_array_dtype, is_float,
is_integer_dtype, is_scalar, needs_i8_conversion, pandas_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.core.ops import get_op_result_name
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=None):
if fastpath is not None:
warnings.warn("The 'fastpath' keyword is deprecated, and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
if fastpath:
return cls._simple_new(data, name=name)
# is_scalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None and not self._can_hold_na:
# Ensure we are not returning an Int64Index with float data:
return self._shallow_copy_with_infer(values=values, **kwargs)
return (super(NumericIndex, self)._shallow_copy(values=values,
**kwargs))
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(('tolerance argument for %s must contain '
'numeric elements if it is list type') %
(type(self).__name__,))
else:
raise ValueError(('tolerance argument for %s must be numeric '
'if it is a scalar: %r') %
(type(self).__name__, tolerance))
return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
def _concat_same_dtype(self, indexes, name):
return _concat._concat_index_same_dtype(indexes).rename(name)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
@Appender(Index.insert.__doc__)
def insert(self, loc, item):
# treat NA values as nans:
if is_scalar(item) and isna(item):
item = self._na_value
return super(NumericIndex, self).insert(loc, item)
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
Notes
-----
An Index instance can **only** contain hashable objects.
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra=''
)
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index, UInt64Index.
"""
def __contains__(self, key):
"""
Check if key is a float and has a decimal. If it has, return False.
"""
hash(key)
try:
if is_float(key) and int(key) != key:
return False
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
class Int64Index(IntegerIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
"""Always 'integer' for ``Int64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(IntegerIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
"""Always 'integer' for ``UInt64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = com.asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return com.asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_default_dtype = np.float64
@property
def inferred_type(self):
"""Always 'floating' for ``Float64Index``"""
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if needs_i8_conversion(dtype):
msg = ('Cannot convert Float64Index to dtype {dtype}; integer '
'values are required for conversion').format(dtype=dtype)
raise TypeError(msg)
elif (is_integer_dtype(dtype) and
not is_extension_array_dtype(dtype)) and self.hasnans:
# TODO(jreback); this can change once we have an EA Index type
# GH 13149
raise ValueError('Cannot convert NA to integer')
return super(Float64Index, self).astype(dtype, copy=copy)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com.values_from_object(key)
loc = self.get_loc(k)
new_values = com.values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._ndarray_values, other._ndarray_values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
pass
except TypeError:
pass
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)) or is_bool(key):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| bsd-3-clause |
DTMilodowski/EOlab | src/potentialAGB_Indonesia_app.py | 1 | 6377 | import numpy as np
import os
import sys
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import prepare_EOlab_layers as EO
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/PotentialBiomass/src')
import geospatial_utility_tools as geo
# Get perceptionally uniform colourmaps
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/plot_EO_data/colormap/')
import colormaps as cmaps
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
plt.register_cmap(name='inferno', cmap=cmaps.inferno)
plt.register_cmap(name='plasma', cmap=cmaps.plasma)
plt.register_cmap(name='magma', cmap=cmaps.magma)
plt.set_cmap(cmaps.viridis)
plt.figure(1, facecolor='White',figsize=[2, 1])
plt.show()
DATADIR = '/disk/scratch/local.2/southeast_asia_PFB/'
SAVEDIR = '/home/dmilodow/DataStore_DTM/EOlaboratory/EOlab/IndonesiaPotentialAGB/v1.0/'
NetCDF_file = 'southeast_asia_PFB_mean_WorldClim2.nc'
ds,geoTrans = EO.load_NetCDF(DATADIR+NetCDF_file,lat_var = 'lat', lon_var = 'lon')
resampling_scalar = 3.
vars = ['AGB_mean','AGBpot_mean','forests']
dataset, geoTrans = EO.resample_dataset(ds,geoTrans,vars,resampling_scalar)
# sequestration potential is defined by pixels with positive potential biomass that
# are not already forests
dataset['seqpot_mean'] = dataset['AGBpot_mean']-dataset['AGB_mean']
dataset['seqpot_mean'][dataset['forests']==1] = 0.
dataset['seqpot_mean'][dataset['seqpot_mean']<0] = 0.
dataset['seqpot_mean'][dataset['AGB_mean']==-9999] = -9999.
dataset['forests'][dataset['forests']!=1] = -9999.
vars = ['AGB_mean','AGBpot_mean','seqpot_mean','forests']
cmaps = ['viridis','viridis','plasma','viridis']
ulims = [200.,200.,100.,1.]
llims = [0.,0.,0.,0.]
axis_labels = ['AGB$_{obs}$ / Mg(C) ha$^{-1}$', 'AGB$_{potential}$ / Mg(C) ha$^{-1}$', 'Sequestration potential / Mg(C) ha$^{-1}$', 'Forest mask (1 = Forest)']
for vv in range(0,len(vars)):
print vars[vv]
file_prefix = SAVEDIR + 'se_asia_' + vars[vv]
# delete existing dataset if present
if 'se_asia_'+vars[vv]+'_data.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'se_asia_'+vars[vv]+'_data.tif'))
if 'se_asia_'+vars[vv]+'_display.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'se_asia_'+vars[vv]+'_display.tif'))
EO.write_array_to_display_layer_GeoTiff(dataset[vars[vv]], geoTrans, file_prefix, cmaps[vv], ulims[vv], llims[vv])
EO.plot_legend(cmaps[vv],ulims[vv],llims[vv],axis_labels[vv], file_prefix)
rows, cols = dataset[vars[0]].shape
latitude = np.arange(geoTrans[3],rows*geoTrans[5]+geoTrans[3],geoTrans[5])
longitude = np.arange(geoTrans[0],cols*geoTrans[1]+geoTrans[0],geoTrans[1])
areas = geo.calculate_cell_area_array(latitude,longitude, area_scalar = 1./10.**4,cell_centred=False)
# loop through the variables, multiplying by cell areas to give values in Mg
for vv in range(0,len(vars)):
print vars[vv]
if 'se_asia_'+vars[vv]+'_total_data.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'se_asia_'+vars[vv]+'_total_data.tif'))
file_prefix = SAVEDIR + 'se_asia_' + vars[vv] + '_total'
out_array = dataset[vars[vv]] * areas
out_array[dataset[vars[vv]]==-9999]=-9999
EO.write_array_to_data_layer_GeoTiff(out_array, geoTrans, file_prefix)
out_array=None
# Also want to write cell areas to file. However, as this will be compared against other layers, need to carry across
# nodata values
areas_out = areas.copy()
areas_out[np.asarray(dataset[vars[0]])==-9999] = -9999
if 'se_asia_cell_areas_data.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'se_asia_cell_areas_data.tif'))
area_file_prefix = SAVEDIR + 'se_asia_cell_areas'
EO.write_array_to_data_layer_GeoTiff(areas_out, geoTrans, area_file_prefix)
"""
# Finally, we also want to write quantitative display layers for the maximum and minimum biomass,
# potential biomass and sequestration potential so that we can define uncertainy boundaries.
NetCDF_file = 'indonesia_PFB_lower.nc'
ds,geoTrans = EO.load_NetCDF(DATADIR+NetCDF_file,lat_var = 'lat', lon_var = 'lon')
resampling_scalar = 3.
vars = ['AGB_lower','AGBpot_lower','forests']
dataset, geoTrans = EO.resample_dataset(ds,geoTrans,vars,resampling_scalar)
# sequestration potential is defined by pixels with positive potential biomass that
# are not already forests
dataset['seqpot_lower'] = dataset['AGBpot_lower']-dataset['AGB_lower']
dataset['seqpot_lower'][dataset['AGB_lower']==-9999] = -9999.
dataset['seqpot_lower'][dataset['forests']==1] = 0.
dataset['seqpot_lower'][dataset['seqpot_lower']<0] = 0.
dataset['forests'][dataset['forests']!=1] = -9999.
vars = ['AGB_lower','AGBpot_lower','seqpot_lower']
for vv in range(0,len(vars)):
print vars[vv]
if 'indonesia_'+vars[vv]+'_total_data.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'indonesia_'+vars[vv]+'_total_data.tif'))
file_prefix = SAVEDIR + 'indonesia_' + vars[vv] + '_total_data'
out_array = dataset[vars[vv]] * areas
out_array[dataset[vars[vv]]==-9999]=-9999
EO.write_array_to_data_layer_GeoTiff(out_array, geoTrans, file_prefix)
out_array=None
NetCDF_file = 'indonesia_PFB_upper.nc'
ds,geoTrans = EO.load_NetCDF(DATADIR+NetCDF_file,lat_var = 'lat', lon_var = 'lon')
resampling_scalar = 3.
vars = ['AGB_upper','AGBpot_upper','forests']
dataset, geoTrans = EO.resample_dataset(ds,geoTrans,vars,resampling_scalar)
# sequestration potential is defined by pixels with positive potential biomass that
# are not already forests
dataset['seqpot_upper'] = dataset['AGBpot_upper']-dataset['AGB_upper']
dataset['seqpot_upper'][dataset['AGB_upper']==-9999] = -9999.
dataset['seqpot_upper'][dataset['forests']==1] = 0.
dataset['seqpot_upper'][dataset['seqpot_upper']<0] = 0.
dataset['forests'][dataset['forests']!=1] = -9999.
vars = ['AGB_upper','AGBpot_upper','seqpot_upper']
for vv in range(0,len(vars)):
print vars[vv]
if 'indonesia_'+vars[vv]+'_total_data.tif' in os.listdir(SAVEDIR):
os.system("rm %s" % (SAVEDIR+'indonesia_'+vars[vv]+'_total_data.tif'))
file_prefix = SAVEDIR + 'indonesia_' + vars[vv] + '_total_data'
out_array = dataset[vars[vv]] * areas
out_array[dataset[vars[vv]]==-9999]=-9999
EO.write_array_to_data_layer_GeoTiff(out_array, geoTrans, file_prefix)
out_array=None
"""
| gpl-3.0 |
MatthieuBizien/scikit-learn | sklearn/linear_model/logistic.py | 7 | 67572 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
akrherz/dep | scripts/switchgrass/mlra_percent_slopes_conv.py | 2 | 1884 | """Print out the percentage of slopes converted."""
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn
LABELS = {
36: "Slopes > 3% to Switchgrass",
37: "Slopes > 6% to Switchgrass",
38: "Slopes > 10% to Switchgrass",
}
def main(argv):
"""Go Main Go"""
mlra_id = int(argv[1])
pgconn = get_dbconn("idep")
mlraxref = read_sql(
"""
select distinct mlra_id, mlra_name from mlra
""",
pgconn,
index_col="mlra_id",
)
print("%s," % (mlraxref.at[mlra_id, "mlra_name"],), end="")
for col, scenario in enumerate(range(36, 39)):
df = read_sql(
"""
with myhucs as (
SELECT huc_12 from huc12 where scenario = 0 and mlra_id = %s
)
select fpath, f.huc_12 from flowpaths f, myhucs h
WHERE f.scenario = 0 and f.huc_12 = h.huc_12
""",
pgconn,
params=(mlra_id,),
index_col=None,
)
if df.empty:
print()
continue
hits = 0
for _, row in df.iterrows():
prj = ("/prj/%s/%s/%s_%s.prj") % (
row["huc_12"][:8],
row["huc_12"][8:],
row["huc_12"],
row["fpath"],
)
prj2 = "/i/%s/%s" % (scenario, prj)
if open(prj2).read().find("SWITCHGRASS.rot") > 0:
hits += 1
print("%.2f," % (hits / float(len(df.index)) * 100.0,), end="")
print()
if __name__ == "__main__":
for mlraid in [
106,
107,
108,
109,
121,
137,
150,
155,
166,
175,
176,
177,
178,
179,
181,
182,
186,
187,
188,
196,
197,
204,
205,
]:
main([None, mlraid])
| mit |
bavardage/statsmodels | statsmodels/examples/ex_kernel_semilinear_dgp.py | 3 | 4916 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
class UnivariateFunc1a(dgp.UnivariateFunc1):
def het_scale(self, x):
return 0.5
seed = np.random.randint(999999)
#seed = 430973
#seed = 47829
seed = 648456 #good seed for het_scale = 0.5
print seed
np.random.seed(seed)
nobs, k_vars = 300, 3
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
xb = x.sum(1) / 3 #beta = [1,1,1]
k_vars_lin = 2
x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin))
funcs = [#dgp.UnivariateFanGijbels1(),
#dgp.UnivariateFanGijbels2(),
#dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
UnivariateFunc1a(x=xb)
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
y = f.y + x2.sum(1)
model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin)
mean, mfx = model.fit()
ax = fig.add_subplot(1, 1, i+1)
f.plot(ax=ax)
xb_est = np.dot(model.exog, model.b)
sortidx = np.argsort(xb_est) #f.x)
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')
# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
print 'beta', model.b
print 'scale - est', (y - (xb_est+mean)).std()
print 'scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \
2 * f.het_scale(1)
fittedvalues = xb_est + mean
resid = np.squeeze(model.endog) - fittedvalues
print 'corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1]
print 'variance of components, var and as fraction of var(y)'
print 'fitted values', fittedvalues.var(), fittedvalues.var() / y.var()
print 'linear ', xb_est.var(), xb_est.var() / y.var()
print 'nonparametric', mean.var(), mean.var() / y.var()
print 'residual ', resid.var(), resid.var() / y.var()
print '\ncovariance decomposition fraction of var(y)'
print np.cov(fittedvalues, resid) / model.endog.var(ddof=1)
print 'sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum()
print '\ncovariance decomposition, xb, m, resid as fraction of var(y)'
print np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1)
fig.suptitle('Kernel Regression')
fig.show()
alpha = 0.7
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
sortidx = np.argsort(xb_est + mean)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Semilinear Model - observed and total fitted')
fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')
# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
# ax.legend(loc='upper left')
sortidx0 = np.argsort(xb)
ax = fig.add_subplot(1, 2, 1)
ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by true xb)')
ax = fig.add_subplot(1, 2, 2)
ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (nonparametric)')
plt.figure()
plt.plot(y, xb_est+mean, '.')
plt.title('observed versus fitted values')
plt.show()
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.py | 16 | 20091 | """Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f::
>>> from scipy import signal
>>> signal.firwin(numtaps, f)
Use a specific window function::
>>> signal.firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> signal.firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> signal.firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0, antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| agpl-3.0 |
yask123/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
jiajunshen/partsNet | pnet/sgd_svm_classification_layer.py | 1 | 1784 | from __future__ import division, print_function, absolute_import
__author__ = 'jiajunshen'
from pnet.layer import SupervisedLayer
from pnet.layer import Layer
import numpy as np
import amitgroup as ag
from sklearn.svm import LinearSVC
from sklearn import cross_validation
from sklearn import linear_model
@Layer.register('sgd-svm-classification-layer')
class SGDSVMClassificationLayer(SupervisedLayer):
def __init__(self, settings={}):
self._settings = settings
self._svm = None
@property
def trained(self):
return self._svm is not None
@property
def classifier(self):
return True
def extract(self,X):
Xflat = X.reshape((X.shape[0], -1))
return self._svm.predict(Xflat)
def train(self, X, Y, OriginalX=None):
Xflat = X.reshape((X.shape[0], -1))
#self._svm = linear_model.SGDClassifier(loss = "hinge",penalty = 'l2', n_iter=20, shuffle=True,verbose = False,
# learning_rate = "constant", eta0 = 0.01, random_state = 0)
self._svm = clf = linear_model.SGDClassifier(alpha=0.01, loss = "log",penalty = 'l2', n_iter=2000, shuffle=True,verbose = False,
learning_rate = "optimal", eta0 = 0.0, epsilon=0.1, random_state = None, warm_start=False,
power_t=0.5, l1_ratio=1.0, fit_intercept=True)
self._svm.fit(Xflat, Y)
print(np.mean(self._svm.predict(Xflat) == Y))
def save_to_dict(self):
d = {}
d['svm'] = self._svm
d['settings'] = self._settings
return d
@classmethod
def load_from_dict(cls, d):
obj = cls()
obj._settings = d['settings']
obj._svm = d['svm']
return obj | bsd-3-clause |
LiaoPan/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
mirestrepo/voxels-at-lems | super3d/boxm2_update_and_refine_super3d.py | 1 | 8572 | # THIS IS /helicopter_providence/middletown_3_29_11/site2_planes/boxm2_1/boxm2_update_and_refine_scene.py
import boxm2_batch,os;
import sys;
import optparse;
import time;
#import matplotlib.pyplot as plt;
boxm2_batch.register_processes();
boxm2_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
import random
#Parse inputs
parser = optparse.OptionParser(description='Update BOXM2 Scene without refinement 0');
parser.add_option('--model_dir', action="store", dest="model_dir");
parser.add_option('--boxm2_dir', action="store", dest="boxm2_dir");
parser.add_option('--imgs_dir', action="store", dest="imgs_dir");
parser.add_option('--cams_dir', action="store", dest="cams_dir");
parser.add_option('--NI', action="store", dest="NI", type='int');
parser.add_option('--NJ', action="store", dest="NJ", type='int');
parser.add_option('--repeat', action="store", dest="repeat", type='int');
options, args = parser.parse_args()
model_dir = options.model_dir;
boxm2_dir = options.boxm2_dir;
imgs_dir = options.imgs_dir;
cams_dir = options.cams_dir;
NI = options.NI;
NJ = options.NJ
repeat = options.repeat;
if not os.path.isdir(boxm2_dir + '/'):
print "Invalid Site Dir"
sys.exit(-1);
if not os.path.isdir(imgs_dir):
print "Invalid Image Dir"
sys.exit(-1);
if not os.path.isdir(cams_dir):
print "Invalid Cams Dir"
sys.exit(-1);
expected_img_dir = boxm2_dir + "/expectedImgs_" + str(repeat)
if not os.path.isdir(expected_img_dir + '/'):
os.mkdir(expected_img_dir + '/');
print("Loading a Scene");
boxm2_batch.init_process("boxm2LoadSceneProcess");
boxm2_batch.set_input_string(0, boxm2_dir + "/scene.xml");
boxm2_batch.run_process();
(scene_id, scene_type) = boxm2_batch.commit_output(0);
scene = dbvalue(scene_id, scene_type);
print("Create Main Cache");
boxm2_batch.init_process("boxm2CreateCacheProcess");
boxm2_batch.set_input_from_db(0,scene);
boxm2_batch.set_input_string(1,"lru");
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
cache = dbvalue(id, type);
print("Init Manager");
boxm2_batch.init_process("boclInitManagerProcess");
boxm2_batch.run_process();
(id, type) = boxm2_batch.commit_output(0);
mgr = dbvalue(id, type);
print("Get Gpu Device");
boxm2_batch.init_process("boclGetDeviceProcess");
boxm2_batch.set_input_string(0,"gpu1")
boxm2_batch.set_input_from_db(1,mgr)
boxm2_batch.run_process();
(id, type) = boxm2_batch.commit_output(0);
device = dbvalue(id, type);
print("Create Gpu Cache");
boxm2_batch.init_process("boxm2CreateOpenclCacheProcess");
boxm2_batch.set_input_from_db(0,device)
boxm2_batch.set_input_from_db(1,scene)
boxm2_batch.run_process();
(id, type) = boxm2_batch.commit_output(0);
openclcache = dbvalue(id, type);
frames=[66,182,180,236,222,252,68,10,240,108,4,190,270,134,30,278,268,136,228,20,200,106,208,58,264,88,152,118,224,146,154,142,122,138,256,212,266,100,64,12,170,204,74,198,114,150,160,176,82,54,254,32,260,80,238,92,220,194,158,26,90,110,124,206,184,232,14,132,166,276,178,192,274,164,116,210,156,282,8,94,214,104,56,40,48,102,216,72,126,60,96,52,218,120,174,98,172,50,84,246,186,36,202,86,258,28,144,0,248,70,230,162,140,44,280,250,272,128,2,262,226,6,34,168,148,46,188,76,24,16,130,38,112,242,18,22,234];
test_frames=[244,196,62,78,42];
print 'STARTING UPDATE'
iter = 0;
for i in frames:
print 'ITERATION %d' % iter
iter = iter+1;
camera_fname = cams_dir+"/camera%(#)05d.txt"%{"#":i}
image_fname = imgs_dir+"/frames_%(#)05d.tif"%{"#":i}
exp_fname= expected_img_dir+ "/frame_%(#)05d.tiff"%{"#":i};
boxm2_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm2_batch.set_input_string(0,camera_fname);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
cam = dbvalue(id,type);
boxm2_batch.init_process("vilLoadImageViewProcess");
boxm2_batch.set_input_string(0,image_fname);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
img = dbvalue(id,type);
print("Update");
boxm2_batch.init_process("boxm2OclUpdateProcess");
boxm2_batch.set_input_from_db(0,device);
boxm2_batch.set_input_from_db(1,scene);
boxm2_batch.set_input_from_db(2,openclcache);
boxm2_batch.set_input_from_db(3,cam);
boxm2_batch.set_input_from_db(4,img);
boxm2_batch.set_input_string(5,"");
boxm2_batch.run_process();
if((iter)% 18==0):
print("\t----------------------WE ARE REFINING!--------------------------");
boxm2_batch.init_process("boxm2OclRefineProcess");
boxm2_batch.set_input_from_db(0,device);
boxm2_batch.set_input_from_db(1,scene);
boxm2_batch.set_input_from_db(2,openclcache);
boxm2_batch.set_input_float(3, 0.3); #0.25 * (repeat+1) ); # originally set to .3
boxm2_batch.run_process();
print("Render");
boxm2_batch.init_process("boxm2OclRenderExpectedImageProcess");
boxm2_batch.set_input_from_db(0,device);
boxm2_batch.set_input_from_db(1,scene);
boxm2_batch.set_input_from_db(2,openclcache);
boxm2_batch.set_input_from_db(3,cam);
boxm2_batch.set_input_unsigned(4,NI);
boxm2_batch.set_input_unsigned(5,NJ);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
exp_img = dbvalue(id,type);
boxm2_batch.init_process("vilSaveImageViewProcess");
boxm2_batch.set_input_from_db(0,exp_img);
boxm2_batch.set_input_string(1,exp_fname);
boxm2_batch.run_process();
boxm2_batch.remove_data(exp_img.id)
boxm2_batch.remove_data(img.id)
boxm2_batch.remove_data(cam.id)
print("Write Main Cache");
boxm2_batch.init_process("boxm2WriteCacheProcess");
boxm2_batch.set_input_from_db(0,cache);
boxm2_batch.run_process();
nadir_cam_fname=cams_dir+ "/camera_nadir.txt"
for test_img_idx in test_frames :
prediction_cam_fname= cams_dir+"/camera%(#)05d.txt"%{"#":test_img_idx};
# Render the the predicted image
boxm2_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm2_batch.set_input_string(0,prediction_cam_fname);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
prediction_cam = dbvalue(id,type);
print("Render");
boxm2_batch.init_process("boxm2OclRenderExpectedImageProcess");
boxm2_batch.set_input_from_db(0,device);
boxm2_batch.set_input_from_db(1,scene);
boxm2_batch.set_input_from_db(2,openclcache);
boxm2_batch.set_input_from_db(3,prediction_cam);
boxm2_batch.set_input_unsigned(4,NI);
boxm2_batch.set_input_unsigned(5,NJ);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
exp_img = dbvalue(id,type);
(id,type) = boxm2_batch.commit_output(1);
vis_img = dbvalue(id,type);
boxm2_batch.init_process("vilSaveImageViewProcess");
boxm2_batch.set_input_from_db(0,exp_img);
boxm2_batch.set_input_string(1,expected_img_dir+ "/predicted_img_%(#)05d.tiff"%{"#":test_img_idx});
boxm2_batch.run_process();
boxm2_batch.init_process("vilSaveImageViewProcess");
boxm2_batch.set_input_from_db(0,vis_img);
boxm2_batch.set_input_string(1,expected_img_dir+ "/predicted_img_mask_%(#)05d.tiff"%{"#":test_img_idx});
boxm2_batch.run_process();
# Render the depth/variance image
boxm2_batch.init_process("vpglLoadPerspectiveCameraProcess");
boxm2_batch.set_input_string(0,nadir_cam_fname);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
nadir_cam = dbvalue(id,type);
boxm2_batch.init_process("boxm2OclRenderExpectedDepthProcess")
boxm2_batch.set_input_from_db(0,device);
boxm2_batch.set_input_from_db(1,scene);
boxm2_batch.set_input_from_db(2,openclcache);
boxm2_batch.set_input_from_db(3,nadir_cam);
boxm2_batch.set_input_unsigned(4,NI);
boxm2_batch.set_input_unsigned(5,NJ);
boxm2_batch.run_process();
(id,type) = boxm2_batch.commit_output(0);
exp_depth_img = dbvalue(id,type);
(id,type) = boxm2_batch.commit_output(1);
exp_var_img = dbvalue(id,type);
boxm2_batch.init_process("vilSaveImageViewProcess");
boxm2_batch.set_input_from_db(0, exp_depth_img);
boxm2_batch.set_input_string(1, expected_img_dir+ "/exepected_depth.tiff");
boxm2_batch.run_process();
boxm2_batch.init_process("vilSaveImageViewProcess");
boxm2_batch.set_input_from_db(0, exp_var_img);
boxm2_batch.set_input_string(1, expected_img_dir+ "/exepected_var.tiff");
boxm2_batch.run_process();
print 'DONE'
| bsd-2-clause |
flaviovdf/pyksc | src/trend-learner-scripts/summarize_results.py | 1 | 1835 | #-*- coding: utf8
from __future__ import division, print_function
from pyksc import dist
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
import glob
import numpy as np
import os
import plac
def main(tseries_fpath, base_folder):
folders = glob.glob(os.path.join(base_folder, 'fold-*'))
num_folders = len(folders)
cluster_mapping = []
C_base = np.loadtxt(os.path.join(folders[0], 'ksc/cents.dat'))
for i in xrange(num_folders):
Ci = np.loadtxt(os.path.join(folders[i], 'ksc/cents.dat'))
dists = dist.dist_all(Ci, C_base, rolling=True)[0]
closest = dists.argmin(axis=1)
cluster_mapping.append({})
for k in xrange(Ci.shape[0]):
cluster_mapping[i][k] = closest[k]
y_true_all = []
y_pred_all = []
for i in xrange(num_folders):
y_true = np.loadtxt(os.path.join(folders[i], 'ksc/test_assign.dat'))
y_pred = np.loadtxt(os.path.join(folders[i], \
'cls-res-fitted-50/pred.dat'))
for j in xrange(y_true.shape[0]):
y_true[j] = cluster_mapping[i][y_true[j]]
if y_pred[j] != -1:
y_pred[j] = cluster_mapping[i][y_pred[j]]
y_true_all.extend(y_true)
y_pred_all.extend(y_pred)
y_pred_all = np.asarray(y_pred_all)
y_true_all = np.asarray(y_true_all)
report = classification_report(y_true_all, y_pred_all)
valid = y_pred_all != -1
print()
print('Using the centroids from folder: ', folders[0])
print('Micro Aggregation of Folds:')
print('%.3f fract of videos were not classified' % (sum(~valid) / y_pred_all.shape[0]))
print()
print(classification_report(y_true_all[valid], y_pred_all[valid]))
if __name__ == '__main__':
plac.call(main)
| bsd-3-clause |
harlowja/networkx | examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
davebrent/consyn | consyn/cli/show.py | 1 | 4218 | # -*- coding: utf-8 -*-
# Copyright (C) 2014, David Poulter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import collections
import click
import matplotlib.pyplot as plt
import numpy
from . import configurator
from ..base import Pipeline
from ..concatenators import concatenator
from ..ext import Analyser
from ..ext import UnitLoader
from ..models import MediaFile
from ..models import Unit
from ..utils import UnitGenerator
FEATURES = len(Analyser())
TICK_COLOR = "#b9b9b9"
GRID_COLOR = "#003902"
WAVE_COLOR = "#00e399"
ONSET_COLOR = "#d20000"
FIGURE_COLOR = "#373737"
BACK_COLOR = "#000000"
def samps_to_secs(samples, samplerate):
return float(samples) / samplerate
@click.command("show", short_help="Show a mediafile and its onsets.")
@click.option("--hopsize", default=1024,
help="Hopsize used to read samples.")
@click.argument("mediafile")
@configurator
def command(config, mediafile, hopsize):
mediafile = MediaFile.by_id_or_name(config.session, mediafile)
pipeline = Pipeline([
UnitGenerator(mediafile, config.session),
UnitLoader(
hopsize=hopsize,
key=lambda state: state["unit"].mediafile.path),
concatenator("clip", mediafile),
list
])
results = pipeline.run()
results = results[0]
duration = float(results["buffer"].shape[1])
time = numpy.linspace(0, samps_to_secs(duration, mediafile.samplerate),
num=duration)
figure, axes = plt.subplots(
mediafile.channels, sharex=True, sharey=True,
subplot_kw={
"xlim": [0, samps_to_secs(duration, mediafile.samplerate)],
"ylim": [-1, 1]
}
)
# Features
figure_feats, axes_feats = plt.subplots(len(FEATURES), sharex=True)
features_all = collections.defaultdict(list)
timings = []
for unit in mediafile.units.order_by(Unit.position) \
.filter(Unit.channel == 0):
for feature in FEATURES:
features_all[feature].append(unit.features[feature])
timings.append(unit.position)
for index, key in enumerate(features_all):
axes_feats[index].plot(timings, features_all[key], color=WAVE_COLOR)
axes_feats[index].set_axisbelow(True)
axes_feats[index].patch.set_facecolor(BACK_COLOR)
for label in axes_feats[index].get_xticklabels():
label.set_color(TICK_COLOR)
label.set_fontsize(1)
for label in axes_feats[index].get_yticklabels():
label.set_color(TICK_COLOR)
label.set_fontsize(1)
# Buffer
if not isinstance(axes, collections.Iterable):
axes = [axes]
for index, ax in enumerate(axes):
ax.grid(True, color=GRID_COLOR, linestyle="solid")
ax.set_axisbelow(True)
for label in ax.get_xticklabels():
label.set_color(TICK_COLOR)
label.set_fontsize(9)
for label in ax.get_yticklabels():
label.set_color(TICK_COLOR)
label.set_fontsize(9)
ax.patch.set_facecolor(BACK_COLOR)
ax.plot(time, results["buffer"][index], color=WAVE_COLOR)
for unit in mediafile.units:
position = samps_to_secs(unit.position, mediafile.samplerate)
position = position if position != 0 else 0.003
if unit.channel == index:
ax.axvline(x=position, color=ONSET_COLOR)
figure.patch.set_facecolor(FIGURE_COLOR)
figure.set_tight_layout(True)
figure_feats.set_facecolor(FIGURE_COLOR)
figure_feats.set_tight_layout(True)
plt.show()
| gpl-3.0 |
rilutham/HAC-DM | src/Segmentation.py | 1 | 3537 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Segmentation.py
@author: rilutham
"""
from PyQt4 import QtGui
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
from scipy.stats import itemfreq
#from sklearn.metrics import silhouette_score
class Segmentation(QtGui.QWidget):
'''
Apply Hierarchical Agglomerative Clustering
'''
def __init__(self, data):
'''
Constructor
'''
super(Segmentation, self).__init__()
# Data which is use for distance measure
self.data = data
self.n_cols = len(self.data.columns)
n_rows = len(self.data.index)
self.dist_data = self.data.ix[:, 1:self.n_cols]
self.label = self.data.ix[0:n_rows, 0:1]
# Label for dendrogram
self.dendro_label = []
for i in range(n_rows):
self.dendro_label.append(self.label.values[i][0])
self.df_result_data = None
# Call initial methods
self.count_distance()
self.do_segmentation()
def count_distance(self):
'''
Count distance beetwen object using jaccard distance
'''
self.row_dist = pd.DataFrame(squareform(pdist(self.dist_data, metric='jaccard')))
self.row_dist = self.row_dist.fillna(0)
def do_segmentation(self):
'''
Apply Complete Linkage method and generate dendrogram
'''
# Cluster using complete linkage
self.row_clusters = linkage(self.row_dist, method='complete')
self.df_result_data = self.data
# Generate cluster index
self.cluster_index = fcluster(self.row_clusters, t=2, criterion='maxclust')
# Generate dendrogram and labels
dendrogram(self.row_clusters, labels=self.dendro_label, \
leaf_font_size=9, leaf_rotation=90)
# Add new column (cluster_index) to result data
self.df_result_data['ID_Segmen'] = self.cluster_index
self.n_cluster = "Jumlah segmen yang terbentuk: {0}".format(max(self.cluster_index))
freq_of_cluster = dict(itemfreq(self.cluster_index))
self.summary_list = []
for key, val in freq_of_cluster.items():
isi = "Segmen ke-{0}: {1} pelanggan".format(key, val)
self.summary_list.append(isi)
# Silhouette
#a = silhouette_score(self.row_dist, self.df_result_data['ID_Segmen'], metric="precomputed")
def refresh_result_data(self, treshold):
'''
Generate dendrogram with new treshold
'''
# Generate dendrogram and labels
dendrogram(self.row_clusters, color_threshold=treshold, labels=self.dendro_label, \
leaf_font_size=9, leaf_rotation=90)
self.df_result_data = self.data
# Generate cluster index
self.cluster_index = fcluster(self.row_clusters, t=treshold, criterion='distance')
# Add new column (cluster_index) to result data
self.df_result_data['ID_Segmen'] = self.cluster_index
freq_of_cluster = dict(itemfreq(self.cluster_index))
self.summary_list = []
for key, val in freq_of_cluster.items():
isi = "Segmen ke-{0}: {1} pelanggan".format(key, val)
self.summary_list.append(isi)
self.n_cluster = "Jumlah segmen yang terbentuk: {0}".format(max(self.cluster_index))
# Silhouette
#a = silhouette_score(self.row_dist, self.df_result_data['ID_Segmen'], metric="precomputed")
| mit |
mrshu/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 5 | 1363 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = pl.plot(xx, yy, 'k-', label='no weights')
h1 = pl.plot(xx, wyy, 'k--', label='with weights')
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired)
pl.legend()
pl.axis('tight')
pl.show()
| bsd-3-clause |
fyffyt/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
numenta-archive/htmresearch | projects/capybara/supervised_baseline/v1_no_sequences/plot_results.py | 9 | 3714 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
import pandas as pd
from sklearn.metrics import (classification_report, confusion_matrix,
accuracy_score)
from baseline_utils import predictions_vote
from plot_utils import (plot_confusion_matrix, plot_train_history,
plot_classification_report, plot_predictions)
if __name__ == '__main__':
# Path to CSV files (training history and predictions)
parser = argparse.ArgumentParser()
parser.add_argument('--vote_window', '-v', dest='vote_window',
type=int, default=11)
parser.add_argument('--input_dir', '-i', dest='input_dir',
type=str, default='results')
parser.add_argument('--output_dir', '-o', dest='output_dir',type=str,
default='plots')
options = parser.parse_args()
vote_window = options.vote_window
input_dir = options.input_dir
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_history_path = os.path.join(input_dir, 'train_history.csv')
predictions_path = os.path.join(input_dir, 'predictions.csv')
# Training history
df = pd.read_csv(train_history_path)
epochs = range(len(df.epoch.values))
acc = df.acc.values
loss = df.loss.values
output_file = os.path.join(output_dir, 'train_history.html')
plot_train_history(epochs, acc, loss, output_file)
print 'Plot saved:', output_file
# Predictions
df = pd.read_csv(predictions_path)
t = df.t.values
X_values = df.scalar_value.values
y_true = df.y_true.values
y_pred = df.y_pred.values
if vote_window > 0:
y_pred = predictions_vote(y_pred, vote_window)
# Accuracy
acc = accuracy_score(y_true, y_pred)
print 'Accuracy on test set:', acc
label_list = sorted(df.y_true.unique())
# Plot normalized confusion matrix
cnf_matrix = confusion_matrix(y_true, y_pred)
output_file = os.path.join(output_dir, 'confusion_matrix.png')
_ = plot_confusion_matrix(cnf_matrix,
output_file,
classes=label_list,
normalize=True,
title='Confusion matrix (accuracy=%.2f)' % acc)
print 'Plot saved:', output_file
# Classification report (F1 score, etc.)
clf_report = classification_report(y_true, y_pred)
output_file = os.path.join(output_dir, 'classification_report.png')
plot_classification_report(clf_report, output_file)
print 'Plot saved:', output_file
# Plot predictions
output_file = os.path.join(output_dir, 'predictions.html')
title = 'Predictions (accuracy=%s)' % acc
plot_predictions(t, X_values, y_true, y_pred, output_file, title)
print 'Plot saved:', output_file
| agpl-3.0 |
OxfordSKA/bda | scripts/process_results.py | 1 | 2529 | #!venv/bin/python
import pyfits
import aplpy
import matplotlib.pyplot as plt
from os.path import join
import os.path
import numpy
def plot_scaling(target, weighting):
root_dir = 'bda_results'
obs_length = [5, 10, 30, 60, 120]
types = {
'default': '_',
'noisy_default': '_noisy_',
'bda': '_bda_',
'noisy_bda': '_noisy_bda_',
'expanded_bda': '_bda_expanded_bda_',
'noisy_expanded_bda': '_noisy_bda_expanded_bda_'
}
results = {
'default': numpy.zeros(len(obs_length)),
'noisy_default': numpy.zeros(len(obs_length)),
'bda': numpy.zeros(len(obs_length)),
'noisy_bda': numpy.zeros(len(obs_length)),
'expanded_bda': numpy.zeros(len(obs_length)),
'noisy_expanded_bda': numpy.zeros(len(obs_length)),
'noise': numpy.zeros(len(obs_length))
}
for i, t in enumerate(obs_length):
sim_dir = 'SIM_%04is' % t
for type_key in types:
type = types[type_key]
model_file = 'calibrated%sMODEL_DATA_%s_%s.fits' % \
(type, target, weighting)
model_file = join(root_dir, sim_dir, model_file)
calibrated_file = 'calibrated%sCORRECTED_DATA_%s_%s.fits' % \
(type, target, weighting)
calibrated_file = join(root_dir, sim_dir, calibrated_file)
print model_file, os.path.isfile(model_file)
print calibrated_file, os.path.isfile(calibrated_file)
model = pyfits.getdata(model_file)
calibrated = pyfits.getdata(calibrated_file)
diff = calibrated - model
results[type_key][i] = numpy.std(diff)
noise_file = 'model_ref_DATA_%s_%s.fits' % (target, weighting)
noise_file = join(root_dir, sim_dir, noise_file)
noise = pyfits.getdata(noise_file)
results['noise'][i] = numpy.std(noise)
print ''
for k in results:
print k, results[k]
ax = plt.gca()
# ax.set_yscale('log', nonposy='clip')
# ax.set_xscale('log', nonposy='clip')
# ax.plot(obs_length, results['noise'], '.-')
ax.plot(obs_length, results['expanded_bda'], 'b-')
ax.plot(obs_length, results['noisy_expanded_bda'], 'r-')
# ax.plot(obs_length, results['default'], 'gx-')
# ax.plot(obs_length, results['bda'], 'g-')
if __name__ == '__main__':
fig = plt.figure(figsize=(6.5, 5.0))
ax = fig.add_subplot(111)
plot_scaling('1', 'n')
plot_scaling('0', 'n')
plt.show()
| bsd-3-clause |
yunque/sms-tools | lectures/06-Harmonic-model/plots-code/carnatic-spectrum.py | 22 | 1042 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/carnatic.wav')
pin = 1.4*fs
w = np.blackman(1601)
N = 4096
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5)
plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)])
plt.title('x (carnatic.wav)')
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5)
plt.axis([0,fs/4,-100,max(mX)])
plt.title ('mX')
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5)
plt.axis([0,fs/4,min(pX),27])
plt.title ('pX')
plt.tight_layout()
plt.savefig('carnatic-spectrum.png')
plt.show()
| agpl-3.0 |
quheng/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
agutieda/QuantEcon.py | examples/eigenvec.py | 7 | 1239 | """
Filename: eigenvec.py
Authors: Tom Sargent and John Stachurski.
Illustrates eigenvectors.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eig
A = ((1, 2),
(2, 1))
A = np.array(A)
evals, evecs = eig(A)
evecs = evecs[:, 0], evecs[:, 1]
fig, ax = plt.subplots()
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.grid(alpha=0.4)
xmin, xmax = -3, 3
ymin, ymax = -3, 3
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# ax.set_xticks(())
# ax.set_yticks(())
# Plot each eigenvector
for v in evecs:
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='blue',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the image of each eigenvector
for v in evecs:
v = np.dot(A, v)
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='red',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the lines they run through
x = np.linspace(xmin, xmax, 3)
for v in evecs:
a = v[1] / v[0]
ax.plot(x, a * x, 'b-', lw=0.4)
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
anntzer/scikit-learn | examples/semi_supervised/plot_self_training_varying_threshold.py | 13 | 4072 | """
=============================================
Effect of varying threshold for self-training
=============================================
This example illustrates the effect of a varying threshold on self-training.
The `breast_cancer` dataset is loaded, and labels are deleted such that only 50
out of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this
dataset, with varying thresholds.
The upper graph shows the amount of labeled samples that the classifier has
available by the end of fit, and the accuracy of the classifier. The lower
graph shows the last iteration in which a sample was labeled. All values are
cross validated with 3 folds.
At low thresholds (in [0.4, 0.5]), the classifier learns from samples that were
labeled with a low confidence. These low-confidence samples are likely have
incorrect predicted labels, and as a result, fitting on these incorrect labels
produces a poor accuracy. Note that the classifier labels almost all of the
samples, and only takes one iteration.
For very high thresholds (in [0.9, 1)) we observe that the classifier does not
augment its dataset (the amount of self-labeled samples is 0). As a result, the
accuracy achieved with a threshold of 0.9999 is the same as a normal supervised
classifier would achieve.
The optimal accuracy lies in between both of these extremes at a threshold of
around 0.7.
"""
print(__doc__)
# Authors: Oliver Rausch <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
n_splits = 3
X, y = datasets.load_breast_cancer(return_X_y=True)
X, y = shuffle(X, y, random_state=42)
y_true = y.copy()
y[50:] = -1
total_samples = y.shape[0]
base_classifier = SVC(probability=True, gamma=0.001, random_state=42)
x_values = np.arange(0.4, 1.05, 0.05)
x_values = np.append(x_values, 0.99999)
scores = np.empty((x_values.shape[0], n_splits))
amount_labeled = np.empty((x_values.shape[0], n_splits))
amount_iterations = np.empty((x_values.shape[0], n_splits))
for (i, threshold) in enumerate(x_values):
self_training_clf = SelfTrainingClassifier(base_classifier,
threshold=threshold)
# We need manual cross validation so that we don't treat -1 as a separate
# class when computing accuracy
skfolds = StratifiedKFold(n_splits=n_splits)
for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
y_test_true = y_true[test_index]
self_training_clf.fit(X_train, y_train)
# The amount of labeled samples that at the end of fitting
amount_labeled[i, fold] = total_samples - np.unique(
self_training_clf.labeled_iter_, return_counts=True)[1][0]
# The last iteration the classifier labeled a sample in
amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_)
y_pred = self_training_clf.predict(X_test)
scores[i, fold] = accuracy_score(y_test_true, y_pred)
ax1 = plt.subplot(211)
ax1.errorbar(x_values, scores.mean(axis=1),
yerr=scores.std(axis=1),
capsize=2, color='b')
ax1.set_ylabel('Accuracy', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.errorbar(x_values, amount_labeled.mean(axis=1),
yerr=amount_labeled.std(axis=1),
capsize=2, color='g')
ax2.set_ylim(bottom=0)
ax2.set_ylabel('Amount of labeled samples', color='g')
ax2.tick_params('y', colors='g')
ax3 = plt.subplot(212, sharex=ax1)
ax3.errorbar(x_values, amount_iterations.mean(axis=1),
yerr=amount_iterations.std(axis=1),
capsize=2, color='b')
ax3.set_ylim(bottom=0)
ax3.set_ylabel('Amount of iterations')
ax3.set_xlabel('Threshold')
plt.show()
| bsd-3-clause |
sekikn/incubator-airflow | tests/providers/apache/pinot/hooks/test_pinot.py | 3 | 9856 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import os
import subprocess
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.pinot.hooks.pinot import PinotAdminHook, PinotDbApiHook
class TestPinotAdminHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.extra_dejson = {'cmd_path': './pinot-admin.sh'}
class PinotAdminHookTest(PinotAdminHook):
def get_connection(self, conn_id):
return conn
self.db_hook = PinotAdminHookTest()
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_schema(self, mock_run_cli):
params = ["schema_file", False]
self.db_hook.add_schema(*params)
mock_run_cli.assert_called_once_with(
[
'AddSchema',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-schemaFile',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_table(self, mock_run_cli):
params = ["config_file", False]
self.db_hook.add_table(*params)
mock_run_cli.assert_called_once_with(
[
'AddTable',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-filePath',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_create_segment(self, mock_run_cli):
params = {
"generator_config_file": "a",
"data_dir": "b",
"segment_format": "c",
"out_dir": "d",
"overwrite": True,
"table_name": "e",
"segment_name": "f",
"time_column_name": "g",
"schema_file": "h",
"reader_config_file": "i",
"enable_star_tree_index": False,
"star_tree_index_spec_file": "j",
"hll_size": 9,
"hll_columns": "k",
"hll_suffix": "l",
"num_threads": 8,
"post_creation_verification": True,
"retry": 7,
}
self.db_hook.create_segment(**params)
mock_run_cli.assert_called_once_with(
[
'CreateSegment',
'-generatorConfigFile',
params["generator_config_file"],
'-dataDir',
params["data_dir"],
'-format',
params["segment_format"],
'-outDir',
params["out_dir"],
'-overwrite',
params["overwrite"],
'-tableName',
params["table_name"],
'-segmentName',
params["segment_name"],
'-timeColumnName',
params["time_column_name"],
'-schemaFile',
params["schema_file"],
'-readerConfigFile',
params["reader_config_file"],
'-starTreeIndexSpecFile',
params["star_tree_index_spec_file"],
'-hllSize',
params["hll_size"],
'-hllColumns',
params["hll_columns"],
'-hllSuffix',
params["hll_suffix"],
'-numThreads',
params["num_threads"],
'-postCreationVerification',
params["post_creation_verification"],
'-retry',
params["retry"],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_upload_segment(self, mock_run_cli):
params = ["segment_dir", False]
self.db_hook.upload_segment(*params)
mock_run_cli.assert_called_once_with(
[
'UploadSegment',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-segmentDir',
params[0],
]
)
@mock.patch('subprocess.Popen')
def test_run_cli_success(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value = mock_proc
params = ["foo", "bar", "baz"]
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_error_message(self, mock_popen):
msg = b"Exception caught"
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(msg)
mock_popen.return_value = mock_proc
params = ["foo", "bar", "baz"]
with self.assertRaises(AirflowException, msg=msg):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_status_code(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 1
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value = mock_proc
self.db_hook.pinot_admin_system_exit = True
params = ["foo", "bar", "baz"]
with self.assertRaises(AirflowException):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
env = os.environ.copy()
env.update({"JAVA_OPTS": "-Dpinot.admin.system.exit=true "})
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=env
)
class TestPinotDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.conn_type = 'http'
self.conn.extra_dejson = {'endpoint': 'query/sql'}
self.cur = mock.MagicMock()
self.conn.cursor.return_value = self.cur
self.conn.__enter__.return_value = self.cur
self.conn.__exit__.return_value = None
class TestPinotDBApiHook(PinotDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestPinotDBApiHook
def test_get_uri(self):
"""
Test on getting a pinot connection uri
"""
db_hook = self.db_hook()
self.assertEqual(db_hook.get_uri(), 'http://host:1000/query/sql')
def test_get_conn(self):
"""
Test on getting a pinot connection
"""
conn = self.db_hook().get_conn()
self.assertEqual(conn.host, 'host')
self.assertEqual(conn.port, '1000')
self.assertEqual(conn.conn_type, 'http')
self.assertEqual(conn.extra_dejson.get('endpoint'), 'query/sql')
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook().get_records(statement))
def test_get_first(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook().get_first(statement))
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
for i in range(len(result_sets)): # pylint: disable=consider-using-enumerate
self.assertEqual(result_sets[i][0], df.values.tolist()[i][0])
class TestPinotDbApiHookIntegration(unittest.TestCase):
@pytest.mark.integration("pinot")
@mock.patch.dict('os.environ', AIRFLOW_CONN_PINOT_BROKER_DEFAULT="pinot://pinot:8000/")
def test_should_return_records(self):
hook = PinotDbApiHook()
sql = "select playerName from baseballStats ORDER BY playerName limit 5"
records = hook.get_records(sql)
self.assertEqual([["A. Harry"], ["A. Harry"], ["Aaron"], ["Aaron Albert"], ["Aaron Albert"]], records)
| apache-2.0 |
yuchenhou/elephant | elephant/main.py | 1 | 2126 | import json
import os
import numpy
import pandas
import seaborn
import sklearn
from matplotlib import pyplot
from elephant.estimator import Estimator
def evaluate(data_set_name, layer_size, n_hidden_layers):
with open(os.path.join('../specs', data_set_name + '.json')) as specs_file:
specs = json.load(specs_file)
data_set = pandas.read_csv(os.path.join('../resources', specs['file']), sep=specs['separator'],
engine=specs['engine'])
print(data_set.head())
with open(os.path.join(os.path.dirname(__file__), 'neural-net.json')) as config_file:
config = json.load(config_file)
x = data_set.ix[:, :2].values
estimator = Estimator(x, config, layer_size, n_hidden_layers)
y = data_set.ix[:, 2].values.reshape(-1, 1)
if specs['scaling']:
y = sklearn.preprocessing.MaxAbsScaler().fit_transform(numpy.log(y))
return estimator.estimate(y, config['batch_size'], specs['test_size'], specs['metric'])
def experiment(data_set_name, layer_size, hidden_layer_count, trial_count, ):
errors = []
for trial in range(trial_count):
errors.append(evaluate(data_set_name, layer_size, hidden_layer_count, ))
errors = numpy.array(errors)
print(errors.mean(), errors.std())
return errors.mean()
def grid_search():
MSEs = []
layer_sizes = [1, 2, 4, 8, ]
hidden_layer_counts = [1, 2, 4, 8, ]
for layer_size in layer_sizes:
MSEs.append(
[experiment('airport', layer_size, hidden_layer_count, 1) for hidden_layer_count in hidden_layer_counts])
mses = pandas.DataFrame(numpy.array(MSEs), layer_sizes, hidden_layer_counts)
print(mses)
axes = seaborn.heatmap(mses, annot=True, )
axes.set_ylabel('layer sizes')
axes.set_xlabel('hidden layer count')
pyplot.savefig('../resources/heat-map')
def main():
# grid_search()
experiment('movie-tweeting', 2, 2, 1, )
if __name__ == '__main__':
recommendation_data = ['movie-lens-100k', 'movie-lens-1m', 'e-pinions', 'movie-tweeting', ]
graph_data = ['airport', 'collaboration', 'congress', 'forum', ]
main()
| mit |
pnedunuri/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
ricket1978/ggplot | ggplot/geoms/geom_line.py | 12 | 1405 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
from .geom import geom
class geom_line(geom):
DEFAULT_AES = {'color': 'black', 'alpha': None, 'linetype': 'solid', 'size': 1.0}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
_units = {'alpha', 'color', 'linestyle'}
def __init__(self, *args, **kwargs):
super(geom_line, self).__init__(*args, **kwargs)
self._warning_printed = False
def _plot_unit(self, pinfo, ax):
if 'linewidth' in pinfo and isinstance(pinfo['linewidth'], list):
# ggplot also supports aes(size=...) but the current mathplotlib
# is not. See https://github.com/matplotlib/matplotlib/issues/2658
pinfo['linewidth'] = 4
if not self._warning_printed:
msg = "'geom_line()' currenty does not support the mapping of " +\
"size ('aes(size=<var>'), using size=4 as a replacement.\n" +\
"Use 'geom_line(size=x)' to set the size for the whole line.\n"
sys.stderr.write(msg)
self._warning_printed = True
pinfo = self.sort_by_x(pinfo)
x = pinfo.pop('x')
y = pinfo.pop('y')
ax.plot(x, y, **pinfo)
| bsd-2-clause |
lenovor/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
Pymatteo/QtNMR | fftscripttest.py | 1 | 3229 | import numpy as np
import matplotlib
matplotlib.use('qt5agg')
import matplotlib.pyplot as plt
# This variable controll the streght of the exponential apodization #
## change it as needed, experiemnt before... (1000 is often good)####
LB=1000
#####################################################################
self.dat.setLB(LB)
def isint(value):
try:
int(value)
return True
except ValueError:
return False
def find_nearest(array,value,np):
idx = np.abs(array-value).argmin()
return idx
np.set_printoptions(precision=5)
filelist = utils.filelist(self.dat.getFilename()[0])
#print(filelist)
folder = self.dat.getFilename()[0].rsplit('/',1)[0]
folder = folder.replace("\\", "/")
signal_max=[0,'']
spectra_list=[]
for ii in filelist:
if isint(ii.split('/')[-1].split('.')[-2]):
print(ii)
spectra_list = spectra_list + [ii]
print(spectra_list)
for ii in spectra_list:
self.loadFile((ii,'*.tnt'))
integral, magnitude_integral = self.dat.find_phcorr_int()
# print(integral, magnitude_integral)
if magnitude_integral[0] > signal_max[0]:
signal_max=[magnitude_integral[0],ii]
#print('signalmax',signal_max)
self.loadFile((signal_max[1],'*.tnt'))
self.echo_find()
selection=self.dat.getSelection()
self.save_selection()
echo_center=(selection[1]-selection[0])/2+selection[0]
spectrum=np.zeros(self.dat.get1dpoints())
frequencies=np.zeros(self.dat.get1dpoints())
print('##############')
print('initilize fft sum')
print('##############')
for ii in spectra_list:
print(ii)
self.loadFile((ii,'*.tnt'))
self.load_selection()
self.dat.auto_phase()
self.dat.setShifter(echo_center)
self.dat.left_shift(True)
self.dat.zerofill()
self.exp_apodization()
self.dat.fourier(True)
spectrum_temp=self.dat.getData()[0].real
frequencies_temp=(self.dat.getXaxis()+int(ii.split('/')[-1].split('.')[-2])*1e3)/1e6
if ii == spectra_list[0]:
frequencies=frequencies_temp
spectrum=spectrum_temp
else:
if (frequencies_temp[0] < np.amin(frequencies)) or (frequencies_temp[0] > np.amax(frequencies)):
print('too wide data separation')
else:
# print(find_nearest(frequencies,frequencies_temp[0],np))
# print(frequencies_temp.size)
# print(frequencies.size)
trailing_zeros=find_nearest(frequencies,frequencies_temp[0],np)+frequencies_temp.size-frequencies.size
leading_zeros=trailing_zeros-frequencies_temp.size+frequencies.size
print(trailing_zeros)
frequencies=np.append(frequencies,frequencies_temp[-trailing_zeros:])
spectrum=np.append(spectrum,np.zeros(trailing_zeros))
spectrum_temp=np.append(np.zeros(leading_zeros),spectrum_temp)
spectrum=spectrum+spectrum_temp
print(frequencies_temp.size)
print(frequencies.size)
print(spectrum.size)
spectrum=np.column_stack((frequencies,spectrum))
np.savetxt(folder+'/spectrum_fftsum_'+folder.rsplit('/',1)[1]+'.dat', spectrum, delimiter=' ')
plt.ion()
plt.clf()
fig=plt.figure(1)
plt.plot(spectrum[:,0], spectrum[:,1])
plt.xlabel('Frequency (MHz)')
plt.ylabel('Intensity')
plt.title("Spectrum "+folder.rsplit('/',1)[1])
plt.ioff()
| gpl-3.0 |
DavC95/algotrading_sandbox | algotrade_sandbox-1.0/algotrade_sandbox.py | 1 | 5150 | #!/usr/local/bin/python3
import numpy as np
import pandas as pd
import os,sys
import shutil
import time
from matplotlib import pyplot as plt
from urllib.request import urlretrieve
import xml.etree.ElementTree as ET
__init_cap=0
__s_universe=[]
__positions_array={}
__close_matrix=pd.DataFrame()
__curr_epoch=0
__portfolio_value=[]
def define_environment(stock_universe,initial_capital,API_key_alphavantage,Full=False,granularity="DAILY"):
#data retrieval
print("\nDOWNLOADING STOCK DATA ...\n")
global __init_cap,__s_universe,__positions_array,__portfolio_value
__init_cap=initial_capital
__portfolio_value.append(__init_cap)
__s_universe=stock_universe
shutil.rmtree("csv_files")
os.makedirs("csv_files")
for i in stock_universe:
try:
if Full==False and granularity=="DAILY":
url_addr="https://www.alphavantage.co/query?function=TIME_SERIES_"+granularity+"&symbol="+str(i)+"&apikey=" + API_key_alphavantage+ "&datatype=csv"
elif Full==True and granularity=="DAILY":
url_addr="https://www.alphavantage.co/query?function=TIME_SERIES_"+granularity+"&symbol="+str(i)+"&apikey="+API_key_alphavantage+ "&datatype=csv"+"&outputsize=full"
if granularity=="INTRADAY_1_MINUTE":
url_addr="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY"+"&symbol="+str(i)+"&apikey="+API_key_alphavantage + "&datatype=csv"+"&outputsize=full"+"&interval=1min"
urlretrieve(url_addr,"csv_files/"+str(i)+".csv")
print(str(i)+" DATA SUCCESSFULLY DOWNLOADED")
except: print("\nError downloading data: check internet connection")
print("\n")
__hist_closing_matrix()
def __hist_closing_matrix():
df_final=pd.DataFrame()
global __s_universe
global __close_matrix
for j in __s_universe:
try:
df=pd.read_csv("csv_files/"+str(j)+".csv")
df_final[str(j)]=df["close"]
except:
print("\nDATA NOT VALID: CHECK THE CSV FILE FOR ALPHAVANTAGE TROUBLESHOOTING INFO")
sys.exit()
df_final["TIMESTAMP"]=df["timestamp"]
__close_matrix=df_final
def plot_stocks(arr_stocks):
plt.style.use("dark_background")
for j in arr_stocks:
plt.plot(__close_matrix[j])
#plt.xticks(np.linspace(0,len(__close_matrix["TIMESTAMP"]),10))
plt.xlabel("Time")
plt.legend()
#plt.grid()
plt.show()
def summary():
print("------"*int(len(__s_universe)/2)+"DATA SUMMARY"+"------"*int(len(__s_universe)/2))
print("DATA FROM: "+ __close_matrix["TIMESTAMP"].iloc[-1] +" TO: "+ __close_matrix["TIMESTAMP"].iloc[0])
print("\n")
print(" "*int(len(__s_universe)+1)+"DATA HEAD\n")
print(__close_matrix.head(20))
print("----------"*int(len(__s_universe)+1))
print(" "*int(len(__s_universe)+1)+"DATA DESCRIPTION\n")
print(__close_matrix.describe())
def start_backtest(trade_algo):
global __curr_epoch
global __positions_array
global __s_universe
for i in __s_universe:
__positions_array[i]=0
print("\n-------- STARTING TRADING ---------")
for i in range(0,len(__close_matrix)):
trade_algo()
eval_portfolio()
__curr_epoch+=1
def curr_price(stock_n):
res=__close_matrix.iloc[::-1]
return res[str(stock_n)].iloc[__curr_epoch]
def __eval_positions():
global __s_universe
global __positions_array
port_value={}
for i in __s_universe:
port_value[i]=(__positions_array[i]*curr_price(i))
return port_value
def buy(stock_name,quantity_curr):
global __positions_array
global __close_matrix
global __s_universe
global __init_cap
__positions_array[stock_name]+=int(quantity_curr/curr_price(stock_name))
__init_cap-=int(quantity_curr/curr_price(stock_name))*curr_price(stock_name)
print("BUYING "+ str("%.6s" % stock_name)+ " AT " + str("%.2f" % curr_price(stock_name)) + " CURRENT CASH: "+str("%.2f" % np.round(__init_cap,2)) +" PORTFOLIO VALUE: " + str(__portfolio_value[-1])+ " CURRENT POSITIONS: " + str(__eval_positions()))
def sell(stock_name,quantity_curr):
global __positions_array
global __close_matrix
global __s_universe
global __init_cap
__positions_array[stock_name]-=int(quantity_curr/curr_price(stock_name))
__init_cap+=int(quantity_curr/curr_price(stock_name))*curr_price(stock_name)
print("SELLING "+ str("%.6s" % stock_name)+ " AT " + str("%.2f" % curr_price(stock_name)) +" CURRENT CASH: "+str("%.2f" % np.round(__init_cap,2)) + " PORTFOLIO VALUE: " + str(__portfolio_value[-1]) + " CURRENT POSITIONS: " + str(__eval_positions()))
def get_time_series(stck_nam):
return __close_matrix[stck_nam]
def eval_portfolio():
global __portfolio_value
__portfolio_value.append(__init_cap+sum([__eval_positions()[i] for i in __s_universe]))
def portfolio_performance():
plt.style.use("dark_background")
plt.plot(__portfolio_value)
plt.title("PORTFOLIO PERFORMANCE: " + __close_matrix["TIMESTAMP"].iloc[-1] +" - "+ __close_matrix["TIMESTAMP"].iloc[0])
plt.show()
| mit |
jart/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Neuroschemata/jerk_snap_crackle_pop | jerk_snap_crackle_pop/load_inputs_targets.py | 2 | 6537 | import os
from collections import deque
import numpy as np
import pandas as pd
# "T_max" and "features" are both hard-coded for now
# T_max = 1849=43*43 allows us to map the inputs onto a square grid
# so as to formulate the problem via an image recognition analogy
T_max = 43**2
features = ['x', 'y', 'v_x', 'v_y', 'accl_x', 'accl_y','jerk_x',\
'jerk_y','snap_x','snap_y','crak_x','crak_y','pop_x','pop_y']
num_channels = len(features)
def _load_trip_features(trip_data,features=features,T_max=T_max):
""" Load preprocessed data ("trip_data") from a single trip."""
df = pd.read_pickle(trip_data)
trip_features = df[features].values.transpose()
# make sure there are no NaNs
assert(not np.any(np.isnan(trip_features)))
assert(not np.any(np.isinf(trip_features)))
# "standardize" data by subtracting mean & re-scaling by std. deviation
trip_features = trip_features - np.mean(trip_features, axis=0)
trip_features = trip_features / np.std(trip_features, axis=0)
# replace any NaNs (that may be caused by previous division) with zeros
trip_features = np.nan_to_num(trip_features)
# get final x,y coordinates of trip
x_T,y_T = trip_features[(0,1),-1]
# fix the shape of the data so that all inputs have length T_max
fit_size= np.zeros((trip_features.shape[0],
T_max-trip_features.shape[1])).astype(trip_features.dtype)
# freeze the x,y coordinates for all t>data.shape[1]
fit_size[0]=x_T*np.ones((1,fit_size.shape[1])).astype(fit_size.dtype)
fit_size[1]=y_T*np.ones((1,fit_size.shape[1])).astype(fit_size.dtype)
trip_features = np.concatenate((trip_features, fit_size),axis=1)
return trip_features.reshape((1, trip_features.shape[0],
trip_features.shape[1]))
def add_features_to_driver(drvr_data_loc, runtime_configuration, split_data=True):
""" Collate trip_features associated with a given driver."""
train_val_ratio = runtime_configuration['train_val_ratio']
assert(train_val_ratio[0]+train_val_ratio[1]==1)
driver_feats = deque([])
for fname in os.listdir(drvr_data_loc):
if os.path.isfile(os.path.join(drvr_data_loc, fname)):
driver_feats.append(
_load_trip_features(os.path.join(drvr_data_loc, fname)))
# driver_feats contains K objects of shape (1,num_channels,T_max)
# D_i will be numpy array of shape (K,num_channels,T_max)
# associated with all K trips made by driver i
D_i = np.concatenate(driver_feats)
drvr_IDs = np.ones(D_i.shape[0]) * int(os.path.basename(drvr_data_loc))
if split_data:
splitoff = int(np.floor(train_val_ratio[0] * drvr_IDs.shape[0]))
return dict(train_with=dict(D_i=D_i[:splitoff],
drvr_IDs=drvr_IDs[:splitoff]),
val_with=dict(D_i=D_i[splitoff:],
drvr_IDs=drvr_IDs[splitoff:]))
else:
return dict(D_i=D_i, drvr_IDs=drvr_IDs)
def _finalize_data(all_training_targets, all_val_targets):
unique_val_targets = np.unique(all_val_targets)
cloned_training_targets, cloned_val_targets = \
np.copy(all_training_targets), np.copy(all_val_targets)
for lbl in range(unique_val_targets.shape[0]):
cloned_training_targets[all_training_targets == unique_val_targets[lbl]] = lbl
cloned_val_targets[all_val_targets == unique_val_targets[lbl]] = lbl
return cloned_training_targets, cloned_val_targets, unique_val_targets
def set_inputs_n_targets(data_logs, runtime_configuration):
""" Prepare complete network-ready inputs."""
trips_per_drvr = 200 # ugh! TODO:remove hard-coding for trips_per_drvr
drivers_dir = data_logs['data_location']
training_ratio=runtime_configuration['train_val_ratio'][0]
drvr_list = [drvr for drvr in os.listdir(drivers_dir)
if os.path.isdir(os.path.join(drivers_dir, drvr))]
packaged_data = dict(train_with=dict(allDs=None,drvr_IDs=None),
val_with=dict(allDs=None,drvr_IDs=None))
# TODO: add num_channels, T_max as parameters passed to the function
num_training_trips = int(np.floor(training_ratio * trips_per_drvr))
num_val_trips = int(trips_per_drvr - num_training_trips)
total_train_samples = len(drvr_list) * num_training_trips
total_val_samples = len(drvr_list) * num_val_trips
assert(total_train_samples + total_val_samples == trips_per_drvr * len(drvr_list))
# create helper functions for slicing to aid in splitting the inputs into
# training and validation sets
bunch_train_trips = \
lambda k: slice(k * num_training_trips, (k + 1) * num_training_trips)
bunch_val_trips = \
lambda k: slice(k * num_val_trips, (k + 1) * num_val_trips)
# initialize data structures for inputs and targets
all_training_inputs = np.zeros(shape=(total_train_samples,num_channels,T_max),
dtype=np.float16)
all_training_targets = np.zeros(shape=(total_train_samples), dtype=np.int16)
all_val_inputs = np.zeros(shape=(total_val_samples,num_channels,T_max),
dtype=np.float16)
all_val_targets = np.zeros(shape=(total_val_samples), dtype=np.int16)
# populate data structures for inputs and targets
for i, drvr in enumerate(drvr_list):
driver_feats = add_features_to_driver(os.path.join(drivers_dir, drvr),\
runtime_configuration)
all_training_inputs[bunch_train_trips(i)] =\
driver_feats['train_with']['D_i'].astype(all_training_inputs.dtype)
all_training_targets[bunch_train_trips(i)]=\
driver_feats['train_with']['drvr_IDs'].astype(all_training_targets.dtype)
all_val_inputs[bunch_val_trips(i)] =\
driver_feats['val_with']['D_i'].astype(all_val_inputs.dtype)
all_val_targets[bunch_val_trips(i)] =\
driver_feats['val_with']['drvr_IDs'].astype(all_val_targets.dtype)
print('{0} % of drivers processed ...'.format(np.round(100.0*(i+1)/len(drvr_list))))
# package inputs and targets into a dictionary
packaged_data['train_with']['allDs']=all_training_inputs
packaged_data['val_with']['allDs']=all_val_inputs
packaged_data['train_with']['drvr_IDs'], \
packaged_data['val_with']['drvr_IDs'], \
packaged_data['class_labels'] \
= _finalize_data(all_training_targets, all_val_targets)
return packaged_data
| lgpl-3.0 |
adamnovak/hgvm-graph-bakeoff-evaluations | scripts/plotVariantsDistances.py | 6 | 16857 | #!/usr/bin/env python2.7
"""
Make some figures for the .tsv output of computeVariantsDistances.py
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools, glob
import doctest, re, json, collections, time, timeit, string, math, copy
from collections import defaultdict
from Bio.Phylo.TreeConstruction import _DistanceMatrix, DistanceTreeConstructor
from Bio import Phylo
import matplotlib
matplotlib.use('Agg')
import pylab
import networkx as nx
from collections import defaultdict
from toillib import robust_makedirs
from callVariants import alignment_sample_tag, alignment_region_tag, alignment_graph_tag, run
from callVariants import graph_path, sample_vg_path, g1k_vg_path, graph_path
from evaluateVariantCalls import defaultdict_set
from computeVariantsDistances import vcf_dist_header, read_tsv, write_tsv
# Set up the plot parameters
# Include both versions of the 1kg SNPs graph name
# copied from plotVariantComparison.sh
PLOT_PARAMS = [
"--categories",
"snp1kg",
"snp1000g",
"haplo1kg",
"sbg",
"cactus",
"camel",
"curoverse",
"debruijn-k31",
"debruijn-k63",
"level1",
"level2",
"level3",
"prg",
"refonly",
"simons",
"trivial",
"vglr",
"haplo1kg30",
"haplo1kg50",
"shifted1kg",
"gatk3",
"platypus",
"g1kvcf",
"freebayes",
"samtools",
"snp1kg_af001",
"snp1kg_af010",
"snp1kg_af100",
"snp1kg_kp",
"haplo1kg30_af001",
"haplo1kg30_af010",
"haplo1kg30_af100",
"haplo1kg50_af001",
"haplo1kg50_af010",
"haplo1kg50_af100",
"platinum",
"freebayes_g",
"snp1kg_norm",
"snp1kg_plat",
"--category_labels ",
"1KG",
"1KG",
"\"1KG Haplo\"",
"7BG",
"Cactus",
"Camel",
"Curoverse",
"\"De Bruijn 31\"",
"\"De Bruijn 63\"",
"Level1",
"Level2",
"Level3",
"PRG",
"Primary",
"SGDP",
"Unmerged",
"VGLR",
"\"1KG Haplo 30\"",
"\"1KG Haplo 50\"",
"Scrambled",
"GATK3",
"Platypus",
"\"1000 Genomes\"",
"FreeBayes",
"Samtools",
"\"1KG .001\"",
"\"1KG .010\"",
"\"1KG .100\"",
"\"1KG UF\"",
"\"1KG Hap30 .001\"",
"\"1KG Hap30 .010\"",
"\"1KG Hap30 .100\"",
"\"1KG Hap50 .001\"",
"\"1KG Hap50 .010\"",
"\"1KG Hap50 .100\"",
"Platinum",
"\"Freebayes VG\"",
"\"1KG Norm\"",
"\"1KG Plat\"",
"--colors",
"\"#fb9a99\"",
"\"#fb9a99\"",
"\"#fdbf6f\"",
"\"#b15928\"",
"\"#1f78b4\"",
"\"#33a02c\"",
"\"#a6cee3\"",
"\"#e31a1c\"",
"\"#ff7f00\"",
"\"#FF0000\"",
"\"#00FF00\"",
"\"#0000FF\"",
"\"#6a3d9a\"",
"\"#000000\"",
"\"#b2df8a\"",
"\"#b1b300\"",
"\"#cab2d6\"",
"\"#00FF00\"",
"\"#0000FF\"",
"\"#FF0000\"",
"\"#25BBD4\"",
"\"#9E7C72\"",
"\"#cab2d6\"",
"\"#FF00FF\"",
"\"#2F4F4F\"",
"\"#C71585\"",
"\"#663399\"",
"\"#F4A460\"",
"\"#FA8072\"",
"\"#556B2F\"",
"\"#DEB887\"",
"\"#800000\"",
"\"#6A5ACD\"",
"\"#C71585\"",
"\"#FF6347\"",
"\"#119911\"",
"\"#b1b300\"",
"\"#fb4a44\"",
"\"#fabf1f\"",
"--font_size 20 --dpi 90"]
def name_map():
""" make a dictionary from the list above """
i = PLOT_PARAMS.index("--categories")
j = PLOT_PARAMS.index("--category_labels ")
names = dict()
for k in range(i + 1, j):
if PLOT_PARAMS[k][0:2] == "--":
break
names[PLOT_PARAMS[i + k]] = PLOT_PARAMS[j + k].replace("\"", "")
# hack in sample-<name> and base-<name> maps
keys = [k for k in names.keys()]
for key in keys:
names["base-{}".format(key)] = "Base {}".format(names[key])
names["sample-{}".format(key)] = "Sample {}".format(names[key])
return names
def parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# General options
parser.add_argument("comp_dir", type=str,
help="directory of comparison output written by computeVariantsDistances.py")
parser.add_argument("--skip", type=str, default=None,
help="comma separated list of skip words to pass to plotHeatMap.py")
parser.add_argument("--top", action="store_true",
help="print some zoom-ins too")
parser.add_argument("--range", help="distance range to plot on either side of max f1 pr dot",
type=float, default=0.1)
args = args[1:]
return parser.parse_args(args)
def plot_kmer_comp(tsv_path, options):
""" take a kmer compare table and make a
jaccard boxplot for the first column and a
recall / precision ploot for the 2nd and third column
"""
out_dir = os.path.join(options.comp_dir, "comp_plots")
robust_makedirs(out_dir)
out_name = os.path.basename(os.path.splitext(tsv_path)[0])
out_base_path = os.path.join(out_dir, out_name)
sample = out_name.split("-")[-1].upper()
region = out_name.split("-")[-2].upper()
params = " ".join(PLOT_PARAMS)
# jaccard boxplot
jac_tsv = out_base_path + "_jac.tsv"
awkstr = '''awk '{if (NR!=1) print $1 "\t" $2}' '''
run("{} {} > {}".format(awkstr, tsv_path, jac_tsv))
jac_png = out_base_path + "_jac.png"
run("scripts/boxplot.py {} --save {} --title \"{} KMER Set Jaccard\" --x_label \"Graph\" --y_label \"Jaccard Index\" --x_sideways {}".format(jac_tsv, jac_png, region, params))
# precision recall scatter plot
acc_tsv = out_base_path + "_acc.tsv"
awkstr = '''awk '{if (NR!=1) print $1 "\t" $4 "\t" $3}' '''
run("{} {} > {}".format(awkstr, tsv_path, acc_tsv))
acc_png = out_base_path + "_acc.png"
run("scripts/scatter.py {} --save {} --title \"{} KMER Set Accuracy\" --x_label \"Recall\" --y_label \"Precision\" --width 12 --height 9 --lines {}".format(acc_tsv, acc_png, region, params))
def make_max_f1_tsv(acc_tsv_path, f1_tsv_path, f1_pr_tsv_path, f1_qual_tsv_path, options):
""" flatten precision-recall tsv into single best f1 entry per graph """
def f1(p, r):
return 0 if p + r == 0 else 2. * ((p * r) / (p + r))
max_f1 = defaultdict(int)
max_pr = dict()
max_qual = dict()
with open(acc_tsv_path) as f:
pr_file = [line for line in f]
for i, line in enumerate(pr_file):
toks = line.split()
try:
name, recall, precision, qual = toks[0], float(toks[1]), float(toks[2]), float(toks[3])
max_f1[name] = max(f1(precision, recall), max_f1[name])
if max_f1[name] == f1(precision, recall):
max_pr[name] = (precision, recall, i)
max_qual[name] = qual
except:
pass
with open(f1_tsv_path, "w") as f1_file:
for name, f1_score in max_f1.items():
f1_file.write("{}\t{}\n".format(name, f1_score))
with open(f1_pr_tsv_path, "w") as f1_pr_file:
for name, pr_score in max_pr.items():
best_f1_line = pr_file[pr_score[2]]
best_recall, best_precision = float(best_f1_line.split()[1]), float(best_f1_line.split()[2])
for i in range(0, len(pr_file)):
line = pr_file[i]
toks = line.split()
if toks[0] == name:
recall, precision = float(toks[1]), float(toks[2])
if abs(recall - best_recall) <= options.range and abs(precision - best_precision) <= options.range:
f1_pr_file.write("{}\t{}\t{}\n".format(name, recall, precision))
with open(f1_qual_tsv_path, "w") as f1_qual_file:
for name, qual_score in max_qual.items():
f1_qual_file.write("{}\t{}\n".format(name, qual_score))
def plot_vcf_comp(tsv_path, options):
""" take the big vcf compare table and make precision_recall plots for all the categories"""
out_dir = os.path.join(options.comp_dir, "comp_plots")
robust_makedirs(out_dir)
out_name = os.path.basename(os.path.splitext(tsv_path)[0])
sample = out_name.split("-")[-1].upper()
region = out_name.split("-")[-2].upper()
def out_base_path(tag, label, extension):
bd = tag if extension != ".tsv" else "tsv"
ret = os.path.join(out_dir, bd, "-".join(out_name.split("-")[:-1]) + "-{}-{}-".format(sample, tag) + region) + "_" + label + extension
robust_makedirs(os.path.dirname(ret))
return ret
params = " ".join(PLOT_PARAMS)
# precision recall scatter plot
header = vcf_dist_header(options)
# strip qual
header = header[:-1]
for i in range(len(header) / 2):
prec_idx = 2 * i
rec_idx = prec_idx + 1
qual_idx = len(header)
print prec_idx, header[prec_idx], rec_idx, header[rec_idx]
ptoks = header[prec_idx].split("-")
rtoks = header[rec_idx].split("-")
assert ptoks[1] == "Precision"
assert rtoks[1] == "Recall"
assert ptoks[:1] == rtoks[:1]
comp_cat = ptoks[0]
if comp_cat not in ["TOT", "SNP", "INDEL"]:
continue
label = header[prec_idx].replace("Precision", "acc")
acc_tsv = out_base_path("pr", label, ".tsv")
print "Make {} tsv with cols {} {}".format(label, rec_idx, prec_idx)
# +1 to convert to awk 1-base coordinates. +1 again since header doesnt include row_label col
awkcmd = '''if (NR!=1) print $1 "\t" ${} "\t" ${} "\t" ${}'''.format(rec_idx + 2, prec_idx + 2, qual_idx + 2)
awkstr = "awk \'{" + awkcmd + "}\'"
run("{} {} > {}".format(awkstr, tsv_path, acc_tsv))
acc_png = out_base_path("pr", label, ".png")
title = sample.upper() + " "
if comp_cat == "TOT":
title += " Total Accuracy"
else:
title += " {} Accuracy".format(comp_cat.title())
if region == "TOTAL":
title += ", all regions"
else:
title += ", {}".format(region)
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x -0.01 --max_x 1.01 --min_y -0.01 --max_y 1.01".format(acc_tsv, acc_png, title, params)
print cmd
os.system(cmd)
#flatten to max f1 tsv and plot as bars
f1_tsv = out_base_path("f1bar", label, ".tsv")
f1_png = out_base_path("f1bar", label, ".png")
f1_pr_tsv = out_base_path("f1pr", label, ".tsv")
f1_pr_png = out_base_path("f1pr", label, ".png")
f1_qual_tsv = out_base_path("f1qual", label, ".tsv")
f1_qual_png = out_base_path("f1qual", label, ".png")
make_max_f1_tsv(acc_tsv, f1_tsv, f1_pr_tsv, f1_qual_tsv, options)
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Max F1\" {}".format(f1_tsv, f1_png, title, params)
print cmd
os.system(cmd)
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5".format(f1_pr_tsv, f1_pr_png, title, params)
print cmd
os.system(cmd)
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Quality for Max F1\" {}".format(f1_qual_tsv, f1_qual_png, title, params)
print cmd
os.system(cmd)
if options.top is True:
# top 20
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.798 --max_x 1.002 --min_y 0.798 --max_y 1.002".format(acc_tsv, acc_png.replace(".png", "_top20.png"), title, params)
print cmd
os.system(cmd)
# top 20
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 11 --height 5.5 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.796 --max_x 1.004 --min_y 0.796 --max_y 1.004".format(acc_tsv, acc_png.replace(".png", "_top20_inset.png"), title, params)
print cmd
os.system(cmd)
# top 40
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.596 --max_x 1.004 --min_y 0.596 --max_y 1.004".format(acc_tsv, acc_png.replace(".png", "_top40.png"), title, params)
print cmd
os.system(cmd)
# top .5 bar
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Max F1\" {} --min 0.5".format(f1_tsv, f1_png.replace(".png", "_top50.png"), title, params)
print cmd
os.system(cmd)
# top .6 bar
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Max F1\" {} --min 0.6".format(f1_tsv, f1_png.replace(".png", "_top60.png"), title, params)
print cmd
os.system(cmd)
# top .7 bar
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Max F1\" {} --min 0.7".format(f1_tsv, f1_png.replace(".png", "_top70.png"), title, params)
print cmd
os.system(cmd)
# top .85 bar
cmd = "scripts/barchart.py {} --ascending --no_n --save {} --title \"{}\" --x_sideways --x_label \"Graph\" --y_label \"Max F1\" {} --min 0.85".format(f1_tsv, f1_png.replace(".png", "_top85.png"), title, params)
print cmd
os.system(cmd)
# top .25 f1pr scatter
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.746 --max_x 1.004 --min_y 0.746 --max_y 1.004".format(f1_pr_tsv, f1_pr_png.replace(".png", "_top25.png"), title, params)
print cmd
os.system(cmd)
# top .50 f1pr scatter
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.496 --max_x 1.004 --min_y 0.496 --max_y 1.004".format(f1_pr_tsv, f1_pr_png.replace(".png", "_top50.png"), title, params)
print cmd
os.system(cmd)
# top .65 f1pr scatter
cmd = "scripts/scatter.py {} --save {} --title \"{}\" --x_label \"Recall\" --y_label \"Precision\" --width 18 --height 9 {} --lines --no_n --line_width 1.5 --marker_size 5 --min_x 0.646 --max_x 1.004 --min_y 0.646 --max_y 1.004".format(f1_pr_tsv, f1_pr_png.replace(".png", "_top65.png"), title, params)
print cmd
os.system(cmd)
def plot_heatmap(tsv, options):
""" make a heatmap """
out_dir = os.path.join(options.comp_dir, "heatmaps")
robust_makedirs(out_dir)
mat, col_names, row_names, row_label = read_tsv(tsv)
names = name_map()
for i in range(len(col_names)):
if col_names[i] in names:
col_names[i] = names[col_names[i]]
for i in range(len(row_names)):
if row_names[i] in names:
row_names[i] = names[row_names[i]]
if "_rename" in tsv:
return
fix_tsv = tsv.replace(".tsv", "_rename.tsv")
write_tsv(fix_tsv, mat, col_names, row_names, row_label)
out_hm = os.path.join(out_dir, os.path.basename(tsv).replace(".tsv", ".pdf"))
ph_opts = "--skip {}".format(options.skip) if options.skip is not None else ""
cmd = "scripts/plotHeatmap.py {} {} {}".format(fix_tsv, out_hm, ph_opts)
print cmd
os.system(cmd)
cmd = "scripts/plotHeatmap.py {} {} {} --log_scale".format(fix_tsv, out_hm.replace(".pdf", "_log.pdf"), ph_opts)
print cmd
os.system(cmd)
def main(args):
options = parse_args(args)
# look through tsvs in comp_tables
for tsv in glob.glob(os.path.join(options.comp_dir, "comp_tables", "*.tsv")):
if "hm" in os.path.basename(tsv).split("-"):
plot_heatmap(tsv, options)
elif "kmer" in os.path.basename(tsv).split("-"):
plot_kmer_comp(tsv, options)
elif "vcf" in os.path.basename(tsv).split("-") or "sompy" in tsv.split("-") \
or "happy" in tsv.split("-") or "vcfeval" in tsv.split("-"):
plot_vcf_comp(tsv, options)
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| mit |
mattgiguere/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
NumCosmo/NumCosmo | examples/example_hiprim.py | 1 | 2535 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import math
import numpy as np
import matplotlib.pyplot as plt
from gi.repository import GObject
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
from py_hiprim_example import PyHIPrimExample
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
#
# Script parameters
#
# maximum multipole
lmax = 2500
#
# Creating a new instance of PyHIPrimExample
#
prim = PyHIPrimExample ()
print ("# As = ", prim.props.As)
print ("# P (k = 1) = ", prim.SA_powspec_k (1.0))
print ("# (a, b, c) = ( ", prim.props.a, ", ", prim.props.b, ", ", prim.props.c, " )")
#
# New CLASS backend precision object
# Let's also increase k_per_decade_primordial since we are
# dealing with a modified spectrum.
#
cbe_prec = Nc.CBEPrecision.new ()
cbe_prec.props.k_per_decade_primordial = 50.0
cbe_prec.props.tight_coupling_approximation = 0
#
# New CLASS backend object
#
cbe = Nc.CBE.prec_new (cbe_prec)
Bcbe = Nc.HIPertBoltzmannCBE.full_new (cbe)
Bcbe.set_TT_lmax (lmax)
# Setting which CMB data to use
Bcbe.set_target_Cls (Nc.DataCMBDataType.TT)
# Setting if the lensed Cl's are going to be used or not.
Bcbe.set_lensed_Cls (True)
#
# New homogeneous and isotropic cosmological model NcHICosmoDEXcdm
#
cosmo = Nc.HICosmo.new_from_name (Nc.HICosmo, "NcHICosmoDEXcdm")
cosmo.omega_x2omega_k ()
cosmo.param_set_by_name ("Omegak", 0.0)
#
# New homogeneous and isotropic reionization object
#
reion = Nc.HIReionCamb.new ()
#
# Adding submodels to the main cosmological model.
#
cosmo.add_submodel (reion)
cosmo.add_submodel (prim)
#
# Preparing the Class backend object
#
Bcbe.prepare (cosmo)
Cls1 = Ncm.Vector.new (lmax + 1)
Cls2 = Ncm.Vector.new (lmax + 1)
Bcbe.get_TT_Cls (Cls1)
prim.props.a = 0
Bcbe.prepare (cosmo)
Bcbe.get_TT_Cls (Cls2)
Cls1_a = Cls1.dup_array ()
Cls2_a = Cls2.dup_array ()
Cls1_a = np.array (Cls1_a[2:])
Cls2_a = np.array (Cls2_a[2:])
ell = np.array (list(range(2, lmax + 1)))
Cls1_a = ell * (ell + 1.0) * Cls1_a
Cls2_a = ell * (ell + 1.0) * Cls2_a
#
# Ploting the TT angular power spcetrum
#
plt.title (r'Modified and non-modified $C_\ell$')
plt.xscale('log')
plt.plot (ell, Cls1_a, 'r', label="Modified")
plt.plot (ell, Cls2_a, 'b--', label="Non-modified")
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell$')
plt.legend(loc=2)
plt.savefig ("hiprim_Cls.svg")
plt.clf ()
| gpl-3.0 |
chrisburr/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
brainiak/brainiak | brainiak/funcalign/fastsrm.py | 2 | 65510 | """Fast Shared Response Model (FastSRM)
The implementation is based on the following publications:
.. [Richard2019] "Fast Shared Response Model for fMRI data"
H. Richard, L. Martin, A. Pinho, J. Pillow, B. Thirion, 2019
https://arxiv.org/pdf/1909.12537.pdf
"""
# Author: Hugo Richard
import hashlib
import logging
import os
import numpy as np
import scipy
from joblib import Parallel, delayed
from brainiak.funcalign.srm import DetSRM
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
import uuid
__all__ = [
"FastSRM",
]
logger = logging.getLogger(__name__)
def get_shape(path):
"""Get shape of saved np array
Parameters
----------
path: str
path to np array
"""
f = open(path, "rb")
version = np.lib.format.read_magic(f)
shape, fortran_order, dtype = np.lib.format._read_array_header(f, version)
f.close()
return shape
def safe_load(data):
"""If data is an array returns data else returns np.load(data)"""
if isinstance(data, np.ndarray):
return data
else:
return np.load(data)
def safe_encode(img):
if isinstance(img, np.ndarray):
name = hashlib.md5(img.tostring()).hexdigest()
else:
name = hashlib.md5(img.encode()).hexdigest()
return name
def assert_non_empty_list(input_list, list_name):
"""
Check that input list is not empty
Parameters
----------
input_list: list
list_name: str
Name of the list
"""
if len(input_list) == 0:
raise ValueError("%s is a list of length 0 which is not valid" %
list_name)
def assert_array_2axis(array, name_array):
"""Check that input is an np array with 2 axes
Parameters
----------
array: np array
name_array: str
Name of the array
"""
if not isinstance(array, np.ndarray):
raise ValueError("%s should be of type "
"np.ndarray but is of type %s" %
(name_array, type(array)))
if len(array.shape) != 2:
raise ValueError("%s must have exactly 2 axes "
"but has %i axes" % (name_array, len(array.shape)))
def assert_valid_index(indexes, max_value, name_indexes):
"""
Check that indexes are between 0 and max_value and number
of indexes is less than max_value
"""
for i, ind_i in enumerate(indexes):
if ind_i < 0 or ind_i >= max_value:
raise ValueError("Index %i of %s has value %i "
"whereas value should be between 0 and %i" %
(i, name_indexes, ind_i, max_value - 1))
def _check_imgs_list(imgs):
"""
Checks that imgs is a non empty list of elements of the same type
Parameters
----------
imgs : list
"""
# Check the list is non empty
assert_non_empty_list(imgs, "imgs")
# Check that all input have same type
for i in range(len(imgs)):
if not isinstance(imgs[i], type(imgs[0])):
raise ValueError("imgs[%i] has type %s whereas "
"imgs[%i] has type %s. "
"This is inconsistent." %
(i, type(imgs[i]), 0, type(imgs[0])))
def _check_imgs_list_list(imgs):
"""
Check input images if they are list of list of arrays
Parameters
----------
imgs : list of list of array of shape [n_voxels, n_components]
imgs is a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes: array
Shape of input images
"""
n_subjects = len(imgs)
# Check that the number of session is not 0
assert_non_empty_list(imgs[0], "imgs[%i]" % 0)
# Check that the number of sessions is the same for all subjects
n_sessions = None
for i in range(len(imgs)):
if n_sessions is None:
n_sessions = len(imgs[i])
if n_sessions != len(imgs[i]):
raise ValueError("imgs[%i] has length %i whereas imgs[%i] "
"has length %i. All subjects should have "
"the same number of sessions." %
(i, len(imgs[i]), 0, len(imgs[0])))
shapes = np.zeros((n_subjects, n_sessions, 2))
# Run array-level checks
for i in range(len(imgs)):
for j in range(len(imgs[i])):
assert_array_2axis(imgs[i][j], "imgs[%i][%i]" % (i, j))
shapes[i, j, :] = imgs[i][j].shape
return shapes
def _check_imgs_list_array(imgs):
"""
Check input images if they are list of arrays.
In this case returned images are a list of list of arrays
where element i,j of the array is a numpy array of
shape [n_voxels, n_timeframes] that contains the data of subject i
collected during session j.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
imgs is a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes: array
Shape of input images
new_imgs: list of list of array of shape [n_voxels, n_components]
"""
n_subjects = len(imgs)
n_sessions = 1
shapes = np.zeros((n_subjects, n_sessions, 2))
new_imgs = []
for i in range(len(imgs)):
assert_array_2axis(imgs[i], "imgs[%i]" % i)
shapes[i, 0, :] = imgs[i].shape
new_imgs.append([imgs[i]])
return new_imgs, shapes
def _check_imgs_array(imgs):
"""Check input image if it is an array
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes : array
Shape of input images
"""
assert_array_2axis(imgs, "imgs")
n_subjects, n_sessions = imgs.shape
shapes = np.zeros((n_subjects, n_sessions, 2))
for i in range(n_subjects):
for j in range(n_sessions):
if not (isinstance(imgs[i, j], str) or isinstance(
imgs[i, j], np.str_) or isinstance(imgs[i, j], np.str)):
raise ValueError("imgs[%i, %i] is stored using "
"type %s which is not a str" %
(i, j, type(imgs[i, j])))
shapes[i, j, :] = get_shape(imgs[i, j])
return shapes
def _check_shapes_components(n_components, n_timeframes):
"""Check that n_timeframes is greater than number of components"""
def _check_shapes_atlas_compatibility(n_voxels,
n_timeframes,
n_components=None,
atlas_shape=None):
if n_components is not None:
if np.sum(n_timeframes) < n_components:
raise ValueError("Total number of timeframes is shorter than "
"number of components (%i < %i)" %
(np.sum(n_timeframes), n_components))
if atlas_shape is not None:
n_supervoxels, n_atlas_voxels = atlas_shape
if n_atlas_voxels != n_voxels:
raise ValueError(
"Number of voxels in the atlas is not the same "
"as the number of voxels in input data (%i != %i)" %
(n_atlas_voxels, n_voxels))
def _check_shapes(shapes,
n_components=None,
atlas_shape=None,
ignore_nsubjects=False):
"""Check that number of voxels is the same for each subjects. Number of
timeframes can vary between sessions but must be consistent across
subjects
Parameters
----------
shapes : array of shape (n_subjects, n_sessions, 2)
Array of shapes of input images
"""
n_subjects, n_sessions, _ = shapes.shape
if n_subjects <= 1 and not ignore_nsubjects:
raise ValueError("The number of subjects should be greater than 1")
n_timeframes_list = [None] * n_sessions
n_voxels = None
for n in range(n_subjects):
for m in range(n_sessions):
if n_timeframes_list[m] is None:
n_timeframes_list[m] = shapes[n, m, 1]
if n_voxels is None:
n_voxels = shapes[m, n, 0]
if n_timeframes_list[m] != shapes[n, m, 1]:
raise ValueError("Subject %i Session %i does not have the "
"same number of timeframes "
"as Subject %i Session %i" % (n, m, 0, m))
if n_voxels != shapes[n, m, 0]:
raise ValueError("Subject %i Session %i"
" does not have the same number of voxels as "
"Subject %i Session %i." % (n, m, 0, 0))
_check_shapes_atlas_compatibility(n_voxels, np.sum(n_timeframes_list),
n_components, atlas_shape)
def check_atlas(atlas, n_components=None):
""" Check input atlas
Parameters
----------
atlas : array, shape=[n_supervoxels, n_voxels] or array, shape=[n_voxels]
or str or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
If atlas is a str the corresponding array is loaded with numpy.load
and expected shape is (n_voxels,) for a deterministic atlas and
(n_supervoxels, n_voxels) for a probabilistic atlas.
n_components : int
Number of timecourses of the shared coordinates
Returns
-------
shape : array or None
atlas shape
"""
if atlas is None:
return None
if not (isinstance(atlas, np.ndarray) or isinstance(atlas, str)
or isinstance(atlas, np.str_) or isinstance(atlas, np.str)):
raise ValueError("Atlas is stored using "
"type %s which is neither np.ndarray or str" %
type(atlas))
if isinstance(atlas, np.ndarray):
shape = atlas.shape
else:
shape = get_shape(atlas)
if len(shape) == 1:
# We have a deterministic atlas
atlas_array = safe_load(atlas)
n_voxels = atlas_array.shape[0]
n_supervoxels = len(np.unique(atlas_array)) - 1
shape = (n_supervoxels, n_voxels)
elif len(shape) != 2:
raise ValueError(
"Atlas has %i axes. It should have either 1 or 2 axes." %
len(shape))
n_supervoxels, n_voxels = shape
if n_supervoxels > n_voxels:
raise ValueError("Number of regions in the atlas is bigger than "
"the number of voxels (%i > %i)" %
(n_supervoxels, n_voxels))
if n_components is not None:
if n_supervoxels < n_components:
raise ValueError("Number of regions in the atlas is "
"lower than the number of components "
"(%i < %i)" % (n_supervoxels, n_components))
return shape
def check_imgs(imgs,
n_components=None,
atlas_shape=None,
ignore_nsubjects=False):
"""
Check input images
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
Returns
-------
reshaped_input: bool
True if input had to be reshaped to match the
n_subjects, n_sessions input
new_imgs: list of list of array or np array
input imgs reshaped if it is a list of arrays so that it becomes a
list of list of arrays
shapes: array
Shape of input images
"""
reshaped_input = False
new_imgs = imgs
if isinstance(imgs, list):
_check_imgs_list(imgs)
if isinstance(imgs[0], list):
shapes = _check_imgs_list_list(imgs)
elif isinstance(imgs[0], np.ndarray):
new_imgs, shapes = _check_imgs_list_array(imgs)
reshaped_input = True
else:
raise ValueError(
"Since imgs is a list, it should be a list of list "
"of arrays or a list of arrays but imgs[0] has type %s" %
type(imgs[0]))
elif isinstance(imgs, np.ndarray):
shapes = _check_imgs_array(imgs)
else:
raise ValueError(
"Input imgs should either be a list or an array but has type %s" %
type(imgs))
_check_shapes(shapes, n_components, atlas_shape, ignore_nsubjects)
return reshaped_input, new_imgs, shapes
def check_indexes(indexes, name):
if not (indexes is None or isinstance(indexes, list)
or isinstance(indexes, np.ndarray)):
raise ValueError(
"%s should be either a list, an array or None but received type %s"
% (name, type(indexes)))
def _check_shared_response_list_of_list(shared_response, n_components,
input_shapes):
# Check that shared_response is indeed a list of list of arrays
n_subjects = len(shared_response)
n_sessions = None
for i in range(len(shared_response)):
if not isinstance(shared_response[i], list):
raise ValueError("shared_response[0] is a list but "
"shared_response[%i] is not a list "
"this is incompatible." % i)
assert_non_empty_list(shared_response[i], "shared_response[%i]" % i)
if n_sessions is None:
n_sessions = len(shared_response[i])
elif n_sessions != len(shared_response[i]):
raise ValueError(
"shared_response[%i] has len %i whereas "
"shared_response[0] has len %i. They should "
"have same length" %
(i, len(shared_response[i]), len(shared_response[0])))
for j in range(len(shared_response[i])):
assert_array_2axis(shared_response[i][j],
"shared_response[%i][%i]" % (i, j))
return _check_shared_response_list_sessions([
np.mean([shared_response[i][j] for i in range(n_subjects)], axis=0)
for j in range(n_sessions)
], n_components, input_shapes)
def _check_shared_response_list_sessions(shared_response, n_components,
input_shapes):
for j in range(len(shared_response)):
assert_array_2axis(shared_response[j], "shared_response[%i]" % j)
if input_shapes is not None:
if shared_response[j].shape[1] != input_shapes[0][j][1]:
raise ValueError(
"Number of timeframes in input images during "
"session %i does not match the number of "
"timeframes during session %i "
"of shared_response (%i != %i)" %
(j, j, shared_response[j].shape[1], input_shapes[0, j, 1]))
if n_components is not None:
if shared_response[j].shape[0] != n_components:
raise ValueError(
"Number of components in "
"shared_response during session %i is "
"different than "
"the number of components of the model (%i != %i)" %
(j, shared_response[j].shape[0], n_components))
return shared_response
def _check_shared_response_list_subjects(shared_response, n_components,
input_shapes):
for i in range(len(shared_response)):
assert_array_2axis(shared_response[i], "shared_response[%i]" % i)
return _check_shared_response_array(np.mean(shared_response, axis=0),
n_components, input_shapes)
def _check_shared_response_array(shared_response, n_components, input_shapes):
assert_array_2axis(shared_response, "shared_response")
if input_shapes is None:
new_input_shapes = None
else:
n_subjects, n_sessions, _ = input_shapes.shape
new_input_shapes = np.zeros((n_subjects, 1, 2))
new_input_shapes[:, 0, 0] = input_shapes[:, 0, 0]
new_input_shapes[:, 0, 1] = np.sum(input_shapes[:, :, 1], axis=1)
return _check_shared_response_list_sessions([shared_response],
n_components, new_input_shapes)
def check_shared_response(shared_response,
aggregate="mean",
n_components=None,
input_shapes=None):
"""
Check that shared response has valid input and turn it into
a session-wise shared response
Returns
-------
added_session: bool
True if an artificial sessions was added to match the list of
session input type for shared_response
reshaped_shared_response: list of arrays
shared response (reshaped to match the list of session input)
"""
# Depending on aggregate and shape of input we infer what to do
if isinstance(shared_response, list):
assert_non_empty_list(shared_response, "shared_response")
if isinstance(shared_response[0], list):
if aggregate == "mean":
raise ValueError("self.aggregate has value 'mean' but "
"shared response is a list of list. This is "
"incompatible")
return False, _check_shared_response_list_of_list(
shared_response, n_components, input_shapes)
elif isinstance(shared_response[0], np.ndarray):
if aggregate == "mean":
return False, _check_shared_response_list_sessions(
shared_response, n_components, input_shapes)
else:
return True, _check_shared_response_list_subjects(
shared_response, n_components, input_shapes)
else:
raise ValueError("shared_response is a list but "
"shared_response[0] is neither a list "
"or an array. This is invalid.")
elif isinstance(shared_response, np.ndarray):
return True, _check_shared_response_array(shared_response,
n_components, input_shapes)
else:
raise ValueError("shared_response should be either "
"a list or an array but is of type %s" %
type(shared_response))
def create_temp_dir(temp_dir):
"""
This check whether temp_dir exists and creates dir otherwise
"""
if temp_dir is None:
return None
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
else:
raise ValueError("Path %s already exists. "
"When a model is used, filesystem should be cleaned "
"by using the .clean() method" % temp_dir)
def reduce_data_single(subject_index,
session_index,
img,
atlas=None,
inv_atlas=None,
low_ram=False,
temp_dir=None):
"""Reduce data using given atlas
Parameters
----------
subject_index : int
session_index : int
img : str or array
path to data.
Data are loaded with numpy.load and expected shape is
(n_voxels, n_timeframes)
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
img can also be an array of shape (n_voxels, n_timeframes)
atlas : array, shape=[n_supervoxels, n_voxels] or [n_voxels] or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
inv_atlas : array, shape=[n_voxels, n_supervoxels] or None
Pseudo inverse of the atlas (only for probabilistic atlases)
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
low_ram : bool
if True and temp_dir is not None, reduced_data will be saved on disk
this increases the number of IO but reduces memory complexity when the
number
of subject and number of sessions are large
Returns
-------
reduced_data : array, shape=[n_timeframes, n_supervoxels]
reduced data
"""
# Here we return to the conventions of the paper
data = safe_load(img).T
n_timeframes, n_voxels = data.shape
# Here we check that input is normalized
if (np.max(np.abs(np.mean(data, axis=0))) > 1e-6
or np.max(np.abs(np.var(data, axis=0) - 1))) > 1e-6:
ValueError("Data in imgs[%i, %i] does not have 0 mean and unit \
variance. If you are using NiftiMasker to mask your data \
(nilearn) please use standardize=True." %
(subject_index, session_index))
if inv_atlas is None and atlas is not None:
atlas_values = np.unique(atlas)
if 0 in atlas_values:
atlas_values = atlas_values[1:]
reduced_data = np.array(
[np.mean(data[:, atlas == c], axis=1) for c in atlas_values]).T
elif inv_atlas is not None and atlas is None:
# this means that it is a probabilistic atlas
reduced_data = data.dot(inv_atlas)
else:
reduced_data = data
if low_ram:
name = safe_encode(img)
path = os.path.join(temp_dir, "reduced_data_" + name)
np.save(path, reduced_data)
return path + ".npy"
else:
return reduced_data
def reduce_data(imgs, atlas, n_jobs=1, low_ram=False, temp_dir=None):
"""Reduce data using given atlas.
Work done in parallel across subjects.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
atlas : array, shape=[n_supervoxels, n_voxels] or array, shape=[n_voxels]
or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
low_ram : bool
if True and temp_dir is not None, reduced_data will be saved on disk
this increases the number of IO but reduces memory complexity when
the number of subject and/or sessions is large
Returns
-------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i collected
during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels
are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
"""
if atlas is None:
A = None
A_inv = None
else:
loaded_atlas = safe_load(atlas)
if len(loaded_atlas.shape) == 2:
A = None
A_inv = loaded_atlas.T.dot(
np.linalg.inv(loaded_atlas.dot(loaded_atlas.T)))
else:
A = loaded_atlas
A_inv = None
n_subjects = len(imgs)
n_sessions = len(imgs[0])
reduced_data_list = Parallel(n_jobs=n_jobs)(
delayed(reduce_data_single)(i,
j,
imgs[i][j],
atlas=A,
inv_atlas=A_inv,
low_ram=low_ram,
temp_dir=temp_dir)
for i in range(n_subjects) for j in range(n_sessions))
if low_ram:
reduced_data_list = np.reshape(reduced_data_list,
(n_subjects, n_sessions))
else:
if len(np.array(reduced_data_list).shape) == 1:
reduced_data_list = np.reshape(reduced_data_list,
(n_subjects, n_sessions))
else:
n_timeframes, n_supervoxels = np.array(reduced_data_list).shape[1:]
reduced_data_list = np.reshape(
reduced_data_list,
(n_subjects, n_sessions, n_timeframes, n_supervoxels))
return reduced_data_list
def _reduced_space_compute_shared_response(reduced_data_list,
reduced_basis_list,
n_components=50):
"""Compute shared response with basis fixed in reduced space
Parameters
----------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
reduced_basis_list : None or list of array, element i has
shape=[n_components, n_supervoxels]
each subject's reduced basis
if None the basis will be generated on the fly
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
n_subjects, n_sessions = reduced_data_list.shape[:2]
s = [None] * n_sessions
# This is just to check that all subjects have same number of
# timeframes in a given session
for n in range(n_subjects):
for m in range(n_sessions):
data_nm = safe_load(reduced_data_list[n][m])
n_timeframes, n_supervoxels = data_nm.shape
if reduced_basis_list is None:
reduced_basis_list = []
for subject in range(n_subjects):
q = np.eye(n_components, n_supervoxels)
reduced_basis_list.append(q)
basis_n = reduced_basis_list[n]
if s[m] is None:
s[m] = data_nm.dot(basis_n.T)
else:
s[m] = s[m] + data_nm.dot(basis_n.T)
for m in range(n_sessions):
s[m] = s[m] / float(n_subjects)
return s
def _compute_and_save_corr_mat(img, shared_response, temp_dir):
"""computes correlation matrix and stores it
Parameters
----------
img : str
path to data.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
shared_response : array, shape=[n_timeframes, n_components]
shared response
"""
data = safe_load(img).T
name = safe_encode(img)
path = os.path.join(temp_dir, "corr_mat_" + name)
np.save(path, shared_response.T.dot(data))
def _compute_and_save_subject_basis(subject_number, sessions, temp_dir):
"""computes correlation matrix for all sessions
Parameters
----------
subject_number: int
Number that identifies the subject. Basis will be stored in
[temp_dir]/basis_[subject_number].npy
sessions : array of str
Element i of the array is a path to the data collected during
session i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
Returns
-------
basis: array, shape=[n_component, n_voxels] or str
basis of subject [subject_number] or path to this basis
"""
corr_mat = None
for session in sessions:
name = safe_encode(session)
path = os.path.join(temp_dir, "corr_mat_" + name + ".npy")
if corr_mat is None:
corr_mat = np.load(path)
else:
corr_mat += np.load(path)
os.remove(path)
basis_i = _compute_subject_basis(corr_mat)
path = os.path.join(temp_dir, "basis_%i" % subject_number)
np.save(path, basis_i)
return path + ".npy"
def _compute_subject_basis(corr_mat):
"""From correlation matrix between shared response and subject data,
Finds subject's basis
Parameters
----------
corr_mat: array, shape=[n_component, n_voxels]
or shape=[n_components, n_supervoxels]
correlation matrix between shared response and subject data or
subject reduced data
element k, v is given by S.T.dot(X_i) where S is the shared response
and X_i the data of subject i.
Returns
-------
basis: array, shape=[n_components, n_voxels]
or shape=[n_components, n_supervoxels]
basis of subject or reduced_basis of subject
"""
# The perturbation is only here to be
# consistent with current implementation
# of DetSRM.
perturbation = np.zeros(corr_mat.shape)
np.fill_diagonal(perturbation, 0.001)
U, _, V = scipy.linalg.svd(corr_mat + perturbation, full_matrices=False)
return U.dot(V)
def fast_srm(reduced_data_list,
n_iter=10,
n_components=None,
low_ram=False,
seed=0):
"""Computes shared response and basis in reduced space
Parameters
----------
reduced_data_list : array, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
n_iter : int
Number of iterations performed
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
if low_ram:
return lowram_srm(reduced_data_list, n_iter, n_components)
else:
# We need to switch data to DetSRM format
# Indeed in DetSRM all sessions are concatenated.
# Whereas in FastSRM multiple sessions are supported.
n_subjects, n_sessions = reduced_data_list.shape[:2]
# We store the correspondence between timeframes and session
timeframes_slices = []
current_j = 0
for j in range(n_sessions):
timeframes_slices.append(
slice(current_j, current_j + len(reduced_data_list[0, j])))
current_j += len(reduced_data_list[0][j])
# Now we can concatenate everything
X = [
np.concatenate(reduced_data_list[i], axis=0).T
for i in range(n_subjects)
]
srm = DetSRM(n_iter=n_iter, features=n_components, rand_seed=seed)
srm.fit(X)
# SRM gives a list of data projected in shared space
# we get the shared response by averaging those
concatenated_s = np.mean(srm.transform(X), axis=0).T
# Let us return the shared response sliced by sessions
return [concatenated_s[i] for i in timeframes_slices]
def lowram_srm(reduced_data_list, n_iter=10, n_components=None):
"""Computes shared response and basis in reduced space
Parameters
----------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
n_iter : int
Number of iterations performed
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
n_subjects, n_sessions = reduced_data_list.shape[:2]
shared_response = _reduced_space_compute_shared_response(
reduced_data_list, None, n_components)
reduced_basis = [None] * n_subjects
for _ in range(n_iter):
for n in range(n_subjects):
cov = None
for m in range(n_sessions):
data_nm = np.load(reduced_data_list[n, m])
if cov is None:
cov = shared_response[m].T.dot(data_nm)
else:
cov += shared_response[m].T.dot(data_nm)
reduced_basis[n] = _compute_subject_basis(cov)
shared_response = _reduced_space_compute_shared_response(
reduced_data_list, reduced_basis, n_components)
return shared_response
def _compute_basis_subject_online(sessions, shared_response_list):
"""Computes subject's basis with shared response fixed
Parameters
----------
sessions : array of str
Element i of the array is a path to the data
collected during session i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
Returns
-------
basis: array, shape=[n_components, n_voxels]
basis
"""
basis_i = None
i = 0
for session in sessions:
data = safe_load(session).T
if basis_i is None:
basis_i = shared_response_list[i].T.dot(data)
else:
basis_i += shared_response_list[i].T.dot(data)
i += 1
del data
return _compute_subject_basis(basis_i)
def _compute_shared_response_online_single(subjects, basis_list, temp_dir,
subjects_indexes, aggregate):
"""Computes shared response during one session with basis fixed
Parameters
----------
subjects : array of str
Element i of the array is a path to the data of subject i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
basis_list : None or list of array, element i has
shape=[n_components, n_voxels]
basis of all subjects, element i is the basis of subject i
temp_dir : None or str
path to basis folder where file basis_%i.npy contains the basis of
subject i
subjects_indexes : list of int or None
list of indexes corresponding to the subjects to use to compute
shared response
aggregate: str or None, default="mean"
if "mean": returns the mean shared response S from all subjects
if None: returns the subject-specific response in shared space S_i
Returns
-------
shared_response : array, shape=[n_timeframes, n_components] or list
shared response
"""
n = 0
if aggregate == "mean":
shared_response = None
if aggregate is None:
shared_response = []
for k, i in enumerate(subjects_indexes):
subject = subjects[k]
# Transpose to be consistent with paper
data = safe_load(subject).T
if temp_dir is None:
basis_i = basis_list[i]
else:
basis_i = np.load(os.path.join(temp_dir, "basis_%i.npy" % i))
if aggregate == "mean":
if shared_response is None:
shared_response = data.dot(basis_i.T)
else:
shared_response += data.dot(basis_i.T)
n += 1
if aggregate is None:
shared_response.append(data.dot(basis_i.T))
if aggregate is None:
return shared_response
if aggregate == "mean":
return shared_response / float(n)
def _compute_shared_response_online(imgs, basis_list, temp_dir, n_jobs,
subjects_indexes, aggregate):
"""Computes shared response with basis fixed
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
basis_list : None or list of array, element i has
shape=[n_components, n_voxels]
basis of all subjects, element i is the basis of subject i
temp_dir : None or str
path to basis folder where file basis_%i.npy contains the basis of
subject i
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
subjects_indexes : list or None
list of indexes corresponding to the subjects to use to compute
shared response
aggregate: str or None, default="mean"
if "mean": returns the mean shared response S from all subjects
if None: returns the subject-specific response in shared space S_i
Returns
-------
shared_response_list : list of array or list of list of array
shared response, element i is the shared response during session i
or element i, j is the shared response of subject i during session j
"""
n_subjects = len(subjects_indexes)
n_sessions = len(imgs[0])
shared_response_list = Parallel(n_jobs=n_jobs)(
delayed(_compute_shared_response_online_single)
([imgs[i][j] for i in range(n_subjects)], basis_list, temp_dir,
subjects_indexes, aggregate) for j in range(n_sessions))
if aggregate is None:
shared_response_list = [[
shared_response_list[j][i].T for j in range(n_sessions)
] for i in range(n_subjects)]
if aggregate == "mean":
shared_response_list = [
shared_response_list[j].T for j in range(n_sessions)
]
return shared_response_list
class FastSRM(BaseEstimator, TransformerMixin):
"""SRM decomposition using a very low amount of memory and \
computational power thanks to the use of an atlas \
as described in [Richard2019]_.
Given multi-subject data, factorize it as a shared response S \
among all subjects and an orthogonal transform (basis) W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
atlas : array, shape=[n_supervoxels, n_voxels] or array,\
shape=[n_voxels] or str or None, default=None
Probabilistic or deterministic atlas on which to project the data. \
Deterministic atlas is an array of shape [n_voxels,] \
where values range from 1 \
to n_supervoxels. Voxels labelled 0 will be ignored. If atlas is a str the \
corresponding array is loaded with numpy.load and expected shape \
is (n_voxels,) for a deterministic atlas and \
(n_supervoxels, n_voxels) for a probabilistic atlas.
n_components : int
Number of timecourses of the shared coordinates
n_iter : int
Number of iterations to perform
temp_dir : str or None
Path to dir where temporary results are stored. If None \
temporary results will be stored in memory. This can results in memory \
errors when the number of subjects and/or sessions is large
low_ram : bool
If True and temp_dir is not None, reduced_data will be saved on \
disk. This increases the number of IO but reduces memory complexity when \
the number of subject and/or sessions is large
seed : int
Seed used for random sampling.
n_jobs : int, optional, default=1
The number of CPUs to use to do the computation. \
-1 means all CPUs, -2 all CPUs but one, and so on.
verbose : bool or "warn"
If True, logs are enabled. If False, logs are disabled. \
If "warn" only warnings are printed.
aggregate: str or None, default="mean"
If "mean", shared_response is the mean shared response \
from all subjects. If None, shared_response contains all \
subject-specific responses in shared space
Attributes
----------
`basis_list`: list of array, element i has \
shape=[n_components, n_voxels] or list of str
- if basis is a list of array, element i is the basis of subject i
- if basis is a list of str, element i is the path to the basis \
of subject i that is loaded with np.load yielding an array of \
shape [n_components, n_voxels].
Note that any call to the clean method erases this attribute
Note
-----
**References:**
H. Richard, L. Martin, A. Pinho, J. Pillow, B. Thirion, 2019: \
Fast shared response model for fMRI data (https://arxiv.org/pdf/1909.12537.pdf)
"""
def __init__(self,
atlas=None,
n_components=20,
n_iter=100,
temp_dir=None,
low_ram=False,
seed=None,
n_jobs=1,
verbose="warn",
aggregate="mean"):
self.seed = seed
self.n_jobs = n_jobs
self.verbose = verbose
self.n_components = n_components
self.n_iter = n_iter
self.atlas = atlas
if aggregate is not None and aggregate != "mean":
raise ValueError("aggregate can have only value mean or None")
self.aggregate = aggregate
self.basis_list = None
if temp_dir is None:
if self.verbose == "warn" or self.verbose is True:
logger.warning("temp_dir has value None. "
"All basis (spatial maps) and reconstructed "
"data will therefore be kept in memory."
"This can lead to memory errors when the "
"number of subjects "
"and/or sessions is large.")
self.temp_dir = None
self.low_ram = False
if temp_dir is not None:
self.temp_dir = os.path.join(temp_dir,
"fastsrm" + str(uuid.uuid4()))
self.low_ram = low_ram
def clean(self):
"""This erases temporary files and basis_list attribute to \
free memory. This method should be called when fitted model \
is not needed anymore.
"""
if self.temp_dir is not None:
if os.path.exists(self.temp_dir):
for root, dirs, files in os.walk(self.temp_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.temp_dir)
if self.basis_list is not None:
self.basis_list is None
def fit(self, imgs):
"""Computes basis across subjects from input imgs
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
Returns
-------
self : object
Returns the instance itself. Contains attributes listed \
at the object level.
"""
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs, n_components=self.n_components, atlas_shape=atlas_shape)
self.clean()
create_temp_dir(self.temp_dir)
if self.verbose is True:
logger.info("[FastSRM.fit] Reducing data")
reduced_data = reduce_data(imgs,
atlas=self.atlas,
n_jobs=self.n_jobs,
low_ram=self.low_ram,
temp_dir=self.temp_dir)
if self.verbose is True:
logger.info("[FastSRM.fit] Finds shared "
"response using reduced data")
shared_response_list = fast_srm(reduced_data,
n_iter=self.n_iter,
n_components=self.n_components,
low_ram=self.low_ram,
seed=self.seed)
if self.verbose is True:
logger.info("[FastSRM.fit] Finds basis using "
"full data and shared response")
if self.n_jobs == 1:
basis = []
for i, sessions in enumerate(imgs):
basis_i = _compute_basis_subject_online(
sessions, shared_response_list)
if self.temp_dir is None:
basis.append(basis_i)
else:
path = os.path.join(self.temp_dir, "basis_%i" % i)
np.save(path, basis_i)
basis.append(path + ".npy")
del basis_i
else:
if self.temp_dir is None:
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_basis_subject_online)(
sessions, shared_response_list) for sessions in imgs)
else:
Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_corr_mat)(
imgs[i][j], shared_response_list[j], self.temp_dir)
for j in range(len(imgs[0])) for i in range(len(imgs)))
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_subject_basis)(i, sessions,
self.temp_dir)
for i, sessions in enumerate(imgs))
self.basis_list = basis
return self
def fit_transform(self, imgs, subjects_indexes=None):
"""Computes basis across subjects and shared response from input imgs
return shared response.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
subjects_indexes : list or None:
if None imgs[i] will be transformed using basis_list[i]. \
Otherwise imgs[i] will be transformed using basis_list[subjects_index[i]]
Returns
--------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
self.fit(imgs)
return self.transform(imgs, subjects_indexes=subjects_indexes)
def transform(self, imgs, subjects_indexes=None):
"""From data in imgs and basis from training data,
computes shared response.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
subjects_indexes : list or None:
if None imgs[i] will be transformed using basis_list[i]. \
Otherwise imgs[i] will be transformed using basis[subjects_index[i]]
Returns
--------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
aggregate = self.aggregate
if self.basis_list is None:
raise NotFittedError("The model fit has not been run yet.")
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs,
n_components=self.n_components,
atlas_shape=atlas_shape,
ignore_nsubjects=True)
check_indexes(subjects_indexes, "subjects_indexes")
if subjects_indexes is None:
subjects_indexes = np.arange(len(imgs))
else:
subjects_indexes = np.array(subjects_indexes)
# Transform specific checks
if len(subjects_indexes) < len(imgs):
raise ValueError("Input data imgs has len %i whereas "
"subject_indexes has len %i. "
"The number of basis used to compute "
"the shared response should be equal "
"to the number of subjects in imgs" %
(len(imgs), len(subjects_indexes)))
assert_valid_index(subjects_indexes, len(self.basis_list),
"subjects_indexes")
shared_response = _compute_shared_response_online(
imgs, self.basis_list, self.temp_dir, self.n_jobs,
subjects_indexes, aggregate)
# If shared response has only 1 session we need to reshape it
if reshaped_input:
if aggregate == "mean":
shared_response = shared_response[0]
if aggregate is None:
shared_response = [
shared_response[i][0] for i in range(len(subjects_indexes))
]
return shared_response
def inverse_transform(
self,
shared_response,
subjects_indexes=None,
sessions_indexes=None,
):
"""From shared response and basis from training data
reconstruct subject's data
Parameters
----------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
subjects_indexes : list or None
if None reconstructs data of all subjects used during train. \
Otherwise reconstructs data of subjects specified by subjects_indexes.
sessions_indexes : list or None
if None reconstructs data of all sessions. \
Otherwise uses reconstructs data of sessions specified by sessions_indexes.
Returns
-------
reconstructed_data: list of list of arrays or list of arrays
- if reconstructed_data is a list of list : element i, j is \
the reconstructed data for subject subjects_indexes[i] and \
session sessions_indexes[j] as an np array of shape n_voxels, \
n_timeframes
- if reconstructed_data is a list : element i is the \
reconstructed data for subject \
subject_indexes[i] as an np array of shape n_voxels, n_timeframes
"""
added_session, shared = check_shared_response(
shared_response, self.aggregate, n_components=self.n_components)
n_subjects = len(self.basis_list)
n_sessions = len(shared)
for j in range(n_sessions):
assert_array_2axis(shared[j], "shared_response[%i]" % j)
check_indexes(subjects_indexes, "subjects_indexes")
check_indexes(sessions_indexes, "sessions_indexes")
if subjects_indexes is None:
subjects_indexes = np.arange(n_subjects)
else:
subjects_indexes = np.array(subjects_indexes)
assert_valid_index(subjects_indexes, n_subjects, "subjects_indexes")
if sessions_indexes is None:
sessions_indexes = np.arange(len(shared))
else:
sessions_indexes = np.array(sessions_indexes)
assert_valid_index(sessions_indexes, n_sessions, "sessions_indexes")
data = []
for i in subjects_indexes:
data_ = []
basis_i = safe_load(self.basis_list[i])
if added_session:
data.append(basis_i.T.dot(shared[0]))
else:
for j in sessions_indexes:
data_.append(basis_i.T.dot(shared[j]))
data.append(data_)
return data
def add_subjects(self, imgs, shared_response):
""" Add subjects to the current fit. Each new basis will be \
appended at the end of the list of basis (which can \
be accessed using self.basis)
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs,
n_components=self.n_components,
atlas_shape=atlas_shape,
ignore_nsubjects=True)
_, shared_response_list = check_shared_response(
shared_response,
n_components=self.n_components,
aggregate=self.aggregate,
input_shapes=shapes)
# we need to transpose shared_response_list to be consistent with
# other functions
shared_response_list = [
shared_response_list[j].T for j in range(len(shared_response_list))
]
if self.n_jobs == 1:
basis = []
for i, sessions in enumerate(imgs):
basis_i = _compute_basis_subject_online(
sessions, shared_response_list)
if self.temp_dir is None:
basis.append(basis_i)
else:
path = os.path.join(
self.temp_dir, "basis_%i" % (len(self.basis_list) + i))
np.save(path, basis_i)
basis.append(path + ".npy")
del basis_i
else:
if self.temp_dir is None:
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_basis_subject_online)(
sessions, shared_response_list) for sessions in imgs)
else:
Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_corr_mat)(
imgs[i][j], shared_response_list[j], self.temp_dir)
for j in range(len(imgs[0])) for i in range(len(imgs)))
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_subject_basis)(
len(self.basis_list) + i, sessions, self.temp_dir)
for i, sessions in enumerate(imgs))
self.basis_list += basis
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| mit |
shopkeep/aws_etl_tools | tests/test_redshift_ingest_integration.py | 1 | 4561 | import os
import csv
import unittest
from importlib import reload
import pandas as pd
import boto3
from aws_etl_tools.mock_s3_connection import MockS3Connection
from aws_etl_tools import config
from aws_etl_tools.redshift_ingest import *
from tests import test_helper
class TestRedshiftIngestIntegration(unittest.TestCase):
TARGET_TABLE = 'public.testing_channels'
AUDIT_TABLE = config.REDSHIFT_INGEST_AUDIT_TABLE
EXPECTED_COUNT_OF_AUDIT_FIELDS = 5
S3_BUCKET_NAME = test_helper.S3_TEST_BUCKET_NAME
TARGET_DATABASE = test_helper.BasicRedshiftButActuallyPostgres()
DESTINATION = RedshiftTable(
database=TARGET_DATABASE,
target_table=TARGET_TABLE,
upsert_uniqueness_key=('id',)
)
def setUp(self):
self.TARGET_DATABASE.execute("""
CREATE TABLE %s (
id integer PRIMARY KEY,
value varchar(20)
)""" % self.TARGET_TABLE
)
test_helper.set_default_s3_base_path()
def tearDown(self):
self.TARGET_DATABASE.execute("""DROP TABLE %s""" % self.TARGET_TABLE)
self.TARGET_DATABASE.execute("""TRUNCATE TABLE %s""" % self.AUDIT_TABLE)
test_helper.clear_temp_directory()
def assert_data_in_target(self):
actual_target_data = self.TARGET_DATABASE.fetch("""select * from {0}""".format(self.TARGET_TABLE))
expected_target_data = [(5, 'funzies'), (7, 'sadzies')]
self.assertEqual(actual_target_data, expected_target_data)
def assert_audit_row_created(self):
audit_row = self.TARGET_DATABASE.fetch("""select * from {0} order by loaded_at desc""".format(self.AUDIT_TABLE))[0]
actual_count_of_audit_fields = len(audit_row)
self.assertEqual(actual_count_of_audit_fields, self.EXPECTED_COUNT_OF_AUDIT_FIELDS)
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_in_memory_data_to_redshift(self):
source_data = [[5, 'funzies'], [7, 'sadzies']]
from_in_memory(source_data, self.DESTINATION)
self.assert_data_in_target()
self.assert_audit_row_created()
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_dataframe_to_redshift(self):
source_dataframe = pd.DataFrame(
[(5, 'funzies'), (7, 'sadzies')],
columns=['one', 'two'],
index=['alpha', 'beta']
)
from_dataframe(source_dataframe, self.DESTINATION)
self.assert_data_in_target()
self.assert_audit_row_created()
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_postgres_query_to_redshift(self):
source_db = test_helper.BasicPostgres()
source_query = """
SELECT 5 AS number, 'funzies' AS category
UNION SELECT 7, 'sadzies'
"""
from_postgres_query(source_db, source_query, self.DESTINATION)
self.assert_data_in_target()
self.assert_audit_row_created()
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_local_file_to_redshift(self):
file_contents = '5,funzies\n7,sadzies\n'
file_path = os.path.join(config.LOCAL_TEMP_DIRECTORY, 'csv_data.csv')
with open(file_path, 'w') as file_writer:
file_writer.write(file_contents)
from_local_file(file_path, self.DESTINATION)
self.assert_data_in_target()
self.assert_audit_row_created()
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_s3_path_to_redshift(self):
file_contents = '5,funzies\n7,sadzies\n'
s3_bucket_name = test_helper.S3_TEST_BUCKET_NAME
s3_key_name = 'namespaced/file/here.csv'
s3 = boto3.resource('s3')
s3.Object(s3_bucket_name, s3_key_name).put(Body=file_contents)
full_s3_path = 's3://' + s3_bucket_name + '/' + s3_key_name
from_s3_path(full_s3_path, self.DESTINATION)
self.assert_data_in_target()
self.assert_audit_row_created()
@MockS3Connection(bucket=S3_BUCKET_NAME)
def test_manifest_to_redshift_raises_value_error(self):
'''This cannot be integration tested because Postgres cannot trivially
be made to handle manifest files.'''
expected_exception_args = ('Postgres cannot handle manifests like redshift. Sorry.',)
manifest_dict = { "entries": [{"url": 's3://this/doesnt/matter.manifest', "mandatory": True}] }
with self.assertRaises(ValueError) as exception_context_manager:
from_manifest(manifest_dict, self.DESTINATION)
self.assertEqual(exception_context_manager.exception.args, expected_exception_args)
| apache-2.0 |
chen0031/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
netortik/yandex-tank | setup.py | 1 | 3156 | from setuptools import setup, find_packages
setup(
name='yandextank',
version='1.11.2',
description='a performance measurement tool',
longer_description='''
Yandex.Tank is a performance measurement and load testing automatization tool.
It uses other load generators such as JMeter, ab or phantom inside of it for
load generation and provides a common configuration system for them and
analytic tools for the results they produce.
''',
maintainer='Alexey Lavrenuke (load testing)',
maintainer_email='[email protected]',
url='http://yandex.github.io/yandex-tank/',
namespace_packages=["yandextank", "yandextank.plugins"],
packages=find_packages(exclude=["tests", "tmp", "docs", "data"]),
install_requires=[
'cryptography>=2.2.1', 'pyopenssl==18.0.0',
'psutil>=1.2.1', 'requests>=2.5.1', 'paramiko>=1.16.0',
'pandas>=0.23.3', 'numpy>=1.13.3', 'future>=0.16.0',
'pip>=8.1.2',
'pyyaml>=3.12', 'cerberus==1.2', 'influxdb>=5.0.0', 'netort==0.2.8'
],
setup_requires=[
],
tests_require=[
'pytest', 'pytest-runner', 'flake8',
],
license='LGPLv2',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: POSIX',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Programming Language :: Python :: 2',
],
entry_points={
'console_scripts': [
'yandex-tank = yandextank.core.cli:main',
'yandex-tank-check-ssh = yandextank.common.util:check_ssh_connection',
'tank-postloader = yandextank.plugins.DataUploader.cli:post_loader',
'tank-docs-gen = yandextank.validator.docs_gen:main'
],
},
package_data={
'yandextank.api': ['config/*'],
'yandextank.core': ['config/*'],
'yandextank.aggregator': ['config/*'],
'yandextank.plugins.Android': ['binary/*', 'config/*'],
'yandextank.plugins.Autostop': ['config/*'],
'yandextank.plugins.Bfg': ['config/*'],
'yandextank.plugins.Console': ['config/*'],
'yandextank.plugins.DataUploader': ['config/*'],
'yandextank.plugins.Influx': ['config/*'],
'yandextank.plugins.JMeter': ['config/*'],
'yandextank.plugins.JsonReport': ['config/*'],
'yandextank.plugins.Pandora': ['config/*'],
'yandextank.plugins.Phantom': ['config/*'],
'yandextank.plugins.RCAssert': ['config/*'],
'yandextank.plugins.ResourceCheck': ['config/*'],
'yandextank.plugins.ShellExec': ['config/*'],
'yandextank.plugins.ShootExec': ['config/*'],
'yandextank.plugins.Telegraf': ['config/*']
},
use_2to3=False, )
| lgpl-2.1 |
hsiaoyi0504/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
0asa/scikit-learn | sklearn/neighbors/base.py | 4 | 25065 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
X = check_array(X, accept_sparse='csr')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
X = check_array(X, accept_sparse='csr')
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
godrayz/trading-with-python | cookbook/workingWithDatesAndTime.py | 77 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S") | bsd-3-clause |
wjfwzzc/Kaggle_Script | word2vec_nlp_tutorial/process/word_vectors.py | 1 | 2123 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import gensim
import keras.preprocessing.text
import nltk
import numpy
import pandas
import data
import process
word_vec_dim = 300
def build_word2vec():
sentences = []
for row in data.train_df['review'].append(data.unlabeled_df['review']):
sentences_df = pandas.DataFrame(nltk.sent_tokenize(row.decode('utf-8').strip()), columns=['sentence'])
sentences_df = process.raw_to_words(sentences_df, 'sentence')
sentences += sentences_df['sentence'].tolist()
model = gensim.models.Word2Vec(sentences, size=word_vec_dim, window=10, min_count=1, workers=1, seed=process.seed)
return model
# word2vec = build_word2vec()
word2vec = gensim.models.Word2Vec.load_word2vec_format('./process/300features_10contexts.bin', binary=True)
word2vec.init_sims(replace=True)
del data.unlabeled_df
train_df = process.raw_to_texts(data.train_df, 'review', dictionary=word2vec.vocab)
del data.train_df
test_df = process.raw_to_texts(data.test_df, 'review', dictionary=word2vec.vocab)
del data.test_df
sequence_tokenizer = keras.preprocessing.text.Tokenizer()
sequence_tokenizer.fit_on_texts(line.encode('utf-8') for line in train_df['review'].values)
max_features = len(sequence_tokenizer.word_index)
train = process.texts_to_sequences(train_df, 'review', sequence_tokenizer, maxlen=2500)
del train_df
test = process.texts_to_sequences(test_df, 'review', sequence_tokenizer, maxlen=2500)
del test_df
weights = numpy.zeros((max_features + 1, word_vec_dim))
for word, index in sequence_tokenizer.word_index.items():
# if index <= max_features and word in word2vec.vocab:
# weights[index, :] = word2vec[word]
if word in word2vec.vocab:
weights[index, :] = word2vec[word]
else:
weights[index, :] = numpy.random.uniform(-0.25, 0.25, word_vec_dim)
del word2vec
del sequence_tokenizer
| mit |
TheSumitGogia/insight | scripts/backend/draw/structs.py | 1 | 18954 | import numpy as np
from sklearn.cluster import AgglomerativeClustering as hcluster
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from collections import deque
import argparse
from scipy.spatial.distance import pdist
def draw(datas, names):
plt.ion()
figure, axes = plt.subplots(1, len(datas), sharey=True)
if len(datas) == 1: axes = [axes]
def draw_ellipse(axes, max_level, cluster):
max_level = max_level
x = 0.5 * (cluster.data[0] + cluster.data[1])
rng = cluster.data[1] - cluster.data[0]
if max_level > 1:
y = .25 + (cluster.level - 1) * 1.5 / (max_level - 1)
height = 1.5 / (max_level - 1)
else:
y = 1
height = .75
ell = Ellipse(xy=[x,y], width=rng, height=height, angle=0)
axes.add_artist(ell)
ell.set_clip_box(axes.bbox)
ell.set_alpha(0.5)
ell.set_facecolor([0, 0, 1.0])
return ell
for i in range(len(datas)):
tree = ClusterTree(datas[i])
clusters = tree.clusters
max_level = tree.max_level
ax = axes[i]
ax.set_title(names[i])
offset, length = [1], [2.0]
rng = np.max(datas[i]) - np.min(datas[i])
ax.hlines(0.1, 0, rng)
ax.set_xlim([0, rng])
ax.set_ylim([0, 2.0])
ax.eventplot(datas[i].copy(), lineoffsets=offset, linelengths=length, orientation='horizontal', colors='b')
ax.get_yaxis().set_visible(False)
for cid in clusters:
cluster = clusters[cid]
ellipse = draw_ellipse(ax, max_level, cluster)
plt.draw()
def draw_tree(tree, prop):
plt.ion()
figure, axes = plt.subplots()
axes = [axes]
trees = [tree]
def draw_ellipse(axes, max_level, cluster):
max_level = max_level
x = 0.5 * (cluster.data[0] + cluster.data[1])
rng = cluster.data[1] - cluster.data[0]
if max_level > 1:
y = .25 + (cluster.level - 1) * 1.5 / (max_level - 1)
height = 1.5 / (max_level - 1)
else:
y = 1
height = .75
ell = Ellipse(xy=[x,y], width=rng, height=height, angle=0)
axes.add_artist(ell)
ell.set_clip_box(axes.bbox)
ell.set_alpha(0.5)
ell.set_facecolor([0, 0, 1.0])
return ell
for i in range(len(trees)):
tree = trees[i]
data = tree.get_data()
clusters = tree.clusters
max_level = tree.max_level
ax = axes[i]
ax.set_title(prop)
offset, length = [1], [2.0]
rng = np.max(data) - np.min(data)
ax.hlines(0.1, 0, rng)
ax.set_xlim([0, rng])
ax.set_ylim([0, 2.0])
ax.eventplot(data.copy(), lineoffsets=offset, linelengths=length, orientation='horizontal', colors='b')
ax.get_yaxis().set_visible(False)
for cid in clusters:
cluster = clusters[cid]
ellipse = draw_ellipse(ax, max_level, cluster)
plt.draw()
class Cluster:
def __init__(self, id, left, right, parent=None, data=None, range=None, count=None):
self.id = id
self.left = left
self.right = right
self.data = data
self.range = range
self.count = count
self.parent = parent
self.items = None
class ClusterTree:
def __init__(self, data):
# NOTE: assumes data is 1-d numpy array
pos = data.argsort()
data = data[pos]
data = data.reshape(data.shape[0], 1)
drange = data[-1,0] - data[0,0]
dcount = data.shape[0]
# run clustering
# clusterer = hcluster(compute_full_tree=True)
clusterer = hcluster(compute_full_tree=True, linkage='complete')
clusterer.fit(data)
hc_children = clusterer.children_
# setup leaf clusters
clusters = {i: Cluster(i, None, None, data=(data[i, 0], data[i, 0]), count=1, range=0) for i in range(data.shape[0])}
leaves = {}
# setup tree
if drange != 0:
minrange = 1
for idx in range(hc_children.shape[0]):
children = hc_children[idx]
lc, rc = clusters[children[0]], clusters[children[1]]
check = 0 if lc.data[-1] < rc.data[0] else 1
left_child, right_child = clusters[children[check]], clusters[children[1-check]]
id = idx + data.shape[0]
cluster = Cluster(id, left_child, right_child)
cluster.data = (min(left_child.data), max(right_child.data))
cluster.range = (cluster.data[-1] - cluster.data[0]) / (drange)
if cluster.range < minrange: minrange = cluster.range
cluster.count = left_child.count + right_child.count
left_child.parent = cluster
right_child.parent = cluster
clusters[id] = cluster
for id in clusters:
cluster = clusters[id]
leaf = (cluster.count == 1)
while (cluster.parent is not None and cluster.parent.range == 0):
cluster = cluster.parent
clusters[id] = cluster
if cluster.range == 0 and cluster.parent is not None and cluster.parent.range != 0:
if cluster.items is None:
cluster.items = set()
if leaf:
leaves[id] = cluster
cluster.items.add(id)
cids = clusters.keys()
for id in cids:
cluster = clusters[id]
if cluster.id != id:
del clusters[id]
elif cluster.range == 0:
cluster.left, cluster.right = None, None
for id in clusters:
cluster = clusters[id]
if cluster.range == 0:
if minrange < 0.1:
cluster.range = minrange
else:
cluster.range = 1e-4
cluster.count = cluster.count * 1.0 / dcount
else:
big_cluster = Cluster(data.shape[0], None, None, data=(data[0,0], data[0,0]), count=data.shape[0], range=1e-4)
big_cluster.items = set(range(data.shape[0]))
for i in range(data.shape[0]):
leaves[i] = big_cluster
clusters = {data.shape[0]: big_cluster}
# setup leaf levels
for lid in leaves:
lcluster = leaves[lid]
lcluster.level = 1
# compute all tree levels
computed = set([leaves[lid].id for lid in leaves.keys()])
for lid in leaves:
cluster = leaves[lid]
while (cluster is not None and \
((cluster.data[0] == cluster.data[1]) or \
(cluster.right.id in computed and cluster.left.id in computed))):
if (cluster.id not in computed):
cluster.level = max(cluster.right.level, cluster.left.level) + 1
computed.add(cluster.id)
cluster = cluster.parent
del computed
# set tree state variables
biggest, max_level = 0, 0
for id in clusters:
if id > biggest: biggest = id
if clusters[id].level > max_level: max_level = clusters[id].level
self.data = data
self.root = clusters[biggest]
self.max_level = max_level
self.clusters = clusters
self.leaves = leaves
self.__dindex = pos
self.__findex = np.argsort(pos)
self.__drange = drange
self.__dcount = dcount
def translate(self, id):
return self.__findex[id]
def get_leaf(self, id):
return self.clusters[id]
def get_data(self):
return self.data.copy()
def get_top(self, numLevels):
clusters = []
toAdd = deque([self.root])
while len(toAdd) > 0:
cluster = queue.popleft()
if cluster.level > self.max_level - numLevels:
clusters.append(cluster)
toAdd.append(cluster.left)
toAdd.append(cluster.right)
return reversed(clusters)
def get_children(self, cluster):
""" O(n) operation for getting items in cluster """
queue = deque([cluster])
children = set()
while len(queue) > 0:
cluster = queue.popleft()
if cluster.left is None and cluster.right is None:
children.update(cluster.items)
continue
if cluster.left is not None:
queue.append(cluster.left)
if cluster.right is not None:
queue.append(cluster.right)
interim = np.array(list(children))
children = set(self.__dindex[interim].tolist())
return children
def trace(self, ancestor, child):
cluster = child
trace = []
while cluster is not None:
trace.append(cluster)
if ancestor == cluster:
return trace
cluster = cluster.parent
return False
def contains(self, container, cluster):
current = cluster
while current is not None:
if container == current:
return True
current = current.parent
return False
class NCluster:
def __init__(self, id, left, right, data=None, parent=None, range=None, count=None):
self.id = id
self.left = left
self.right = right
self.data = data # data indices, NOTE: a bit memory intensive
self.range = range
self.count = count
self.parent = parent
class NClusterTree:
def __diameter(self, data):
distances = pdist(data, 'euclidean')
if distances.shape[0] == 0:
return 0
else:
diameter = np.max(distances)
return diameter
def __compute_distances(self, data):
distances = pdist(data, 'euclidean')
self.__distances = distances
def __init__(self, data):
drange = self.__diameter(data)
dcount = data.shape[0]
# run clustering
clusterer = hcluster(compute_full_tree=True)
clusterer.fit(data.copy())
hc_children = clusterer.children_
# setup leaf clusters
clusters = {i: Cluster(i, None, None, data=np.array([data[i, :]]), count=1, range=0) for i in range(data.shape[0])}
leaves = {}
# setup tree
if drange != 0:
minrange = 1
for idx in range(hc_children.shape[0]):
children = hc_children[idx]
left_child, right_child = clusters[children[0]], clusters[children[1]]
id = idx + data.shape[0]
cluster = Cluster(id, left_child, right_child)
cluster.data = np.vstack((left_child.data, right_child.data))
cluster.range = self.__diameter(cluster.data) / (drange)
if cluster.range < minrange: minrange = cluster.range
cluster.count = left_child.count + right_child.count
left_child.parent = cluster
right_child.parent = cluster
clusters[id] = cluster
for id in clusters:
cluster = clusters[id]
leaf = (cluster.count == 1)
while (cluster.parent is not None and cluster.parent.range == 0):
cluster = cluster.parent
clusters[id] = cluster
if cluster.range == 0 and cluster.parent is not None and cluster.parent.range != 0:
if cluster.items is None:
cluster.items = set()
if leaf:
leaves[id] = cluster
cluster.items.add(id)
# clear "clusters" from original leaves
# set true leaves children to empty
cids = clusters.keys()
for id in cids:
cluster = clusters[id]
if cluster.id != id:
del clusters[id]
elif cluster.range == 0:
cluster.left, cluster.right = None, None
for id in clusters:
cluster = clusters[id]
if cluster.range == 0:
if minrange < 0.1:
cluster.range = minrange
else:
cluster.range = 1e-4
cluster.count = cluster.count * 1.0 / dcount
else:
big_cluster = Cluster(data.shape[0], None, None, data=np.array(data[0, :]), count=data.shape[0], range=1e-4)
big_cluster.items = set(range(data.shape[0]))
for i in range(data.shape[0]):
leaves[i] = big_cluster
clusters = {data.shape[0]: big_cluster}
# setup leaf levels
for lid in leaves:
lcluster = leaves[lid]
lcluster.level = 1
# compute all tree levels
computed = set([leaves[lid].id for lid in leaves.keys()])
for lid in leaves:
cluster = leaves[lid]
while (cluster is not None and (cluster.right is None or cluster.right.id in computed) and (cluster.left is None or cluster.left.id in computed)):
if (cluster.id not in computed):
cluster.level = max(cluster.right.level, cluster.left.level) + 1
computed.add(cluster.id)
cluster = cluster.parent
del computed
# set tree state variables
biggest, max_level = 0, 0
test_cl = leaves[0]
while (test_cl.parent is not None):
test_cl = test_cl.parent
self.root = test_cl
self.max_level = test_cl.level
self.data = data
self.clusters = clusters
self.leaves = leaves
self.__drange = drange
self.__dcount = dcount
def translate(self, id):
return id
def get_top(self, numLevels):
clusters = []
toAdd = deque([self.root])
while len(toAdd) > 0:
cluster = toAdd.popleft()
if cluster.level > self.max_level - numLevels:
clusters.append(cluster)
if cluster.left is not None:
toAdd.append(cluster.left)
if cluster.right is not None:
toAdd.append(cluster.right)
clusters.reverse()
return clusters
def get_leaf(self, id):
return self.leaves[id]
def get_data(self):
return self.data.copy()
def get_children(self, cluster):
""" O(n) operation for getting items in cluster """
queue = deque([cluster])
children = set()
while len(queue) > 0:
cluster = queue.popleft()
if cluster.left is None and cluster.right is None:
children.update(cluster.items)
continue
if cluster.left is not None:
queue.append(cluster.left)
if cluster.right is not None:
queue.append(cluster.right)
return children
def trace(self, ancestor, child):
cluster = child
trace = []
while cluster is not None:
trace.append(cluster)
if ancestor == cluster:
return trace
cluster = cluster.parent
return False
def contains(self, container, cluster):
current = cluster
while current is not None:
if container == current:
return True
current = current.parent
return False
def ctree_test(clust_str, var_str, space_str, rng, num_samples):
from data import UniformMixture as um
def gen_data(clust_str, var_str, space_str, rng, num_samples):
size_map = {'s': 1, 'm': 3, 'l': 5}
var_map = {'s': 1, 'm': 3, 'l': 5}
space_map = {'s': 1, 'm': 3, 'l': 5}
sizes = [[size_map[c] for c in clust] for clust in clust_str.split('-')]
sizes = [sum(csizes) for csizes in sizes]
spaces = [[space_map[c] for c in clust] for clust in space_str.split('-')]
spaces = [sum(cspaces) for cspaces in spaces]
variances = [[var_map[c] for c in clust] for clust in var_str.split('-')]
variances = [sum(cvars) for cvars in variances]
total_space = sum(spaces) + sum(variances)
total_size = sum(sizes)
ranges, probs = [], []
tspace = 0
for i in range(len(variances)):
space, var, size = spaces[i], variances[i], sizes[i]
tspace += space
start = (tspace * 1.0 / total_space) * rng
end = start + var * 1.0 / total_space * rng
slot = (start, end)
tspace += var
prob = size * 1.0 / total_size
ranges.append(slot); probs.append(prob)
dist = um(ranges, probs)
data = dist.sample(num_samples)
return data
def draw_ellipse(axes, cluster):
x = 0.5 * (cluster.data[0] + cluster.data[1])
rng = cluster.data[1] - cluster.data[0]
y = .25 + (cluster.level - 1) * 1.5 / (max_level - 1)
height = 1.5 / (max_level - 1)
ell = Ellipse(xy=[x,y], width=rng, height=height, angle=0)
axes.add_artist(ell)
ell.set_clip_box(axes.bbox)
ell.set_alpha(0.5)
ell.set_facecolor([0, 0, 1.0])
data = gen_data(clust_str, var_str, space_str, rng, num_samples)
ct = ClusterTree(data)
max_level, clusters = ct.max_level, ct.clusters
fig, ax = plt.subplots()
offset, length = [1], [2.0]
plt.hlines(0.1, 0, rng)
plt.xlim([0, rng])
plt.ylim([0, 2.0])
ev = plt.eventplot(data, lineoffsets=offset, linelengths=length, orientation='horizontal', colors='b')
ax.get_yaxis().set_visible(False)
for cluster in clusters:
draw_ellipse(ax, clusters[cluster])
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test Cluster Tree")
parser.add_argument('-n', '--samples', nargs=1,required=True,type=int,help='number of samples')
parser.add_argument('-r', '--range', nargs=1,required=True,type=int,help='max data value')
parser.add_argument('-s', '--sizes', nargs=1,required=True,type=str,help='cluster size string')
parser.add_argument('-p', '--spaces', nargs=1,required=True,type=str,help='spaces between clusters')
parser.add_argument('-v', '--variances', nargs=1,required=True,type=str,help='spaces for clusters')
args = vars(parser.parse_args())
samples = args['samples'][0]
rng = args['range'][0]
sizes = args['sizes'][0]
spaces = args['spaces'][0]
variances = args['variances'][0]
ctree_test(sizes, variances, spaces, rng, samples)
| mit |
ARM-software/astc-encoder | Test/astc_test_result_plot.py | 1 | 12791 | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2020-2021 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The ``astc_test_result_plot.py`` script consolidates all current sets of
reference results into a single graphical plot.
"""
import re
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import testlib.resultset as trs
from collections import defaultdict as ddict
def find_reference_results():
"""
Scrape the Test/Images directory for result CSV files and return an
mapping of the result sets.
Returns:
Returns a three deep tree of dictionaries, with the final dict
pointing at a `ResultSet` object. The hierarchy is:
imageSet => quality => encoder => result
"""
scriptDir = os.path.dirname(__file__)
imageDir = os.path.join(scriptDir, "Images")
# Pattern for extracting useful data from the CSV file name
filePat = re.compile(r"astc_reference-(.+)_(.+)_results\.csv")
# Build a three level dictionary we can write into
results = ddict(lambda: ddict(lambda: ddict()))
# Final all CSVs, load them and store them in the dict tree
for root, dirs, files in os.walk(imageDir):
for name in files:
match = filePat.match(name)
if match:
fullPath = os.path.join(root, name)
encoder = match.group(1)
quality = match.group(2)
imageSet = os.path.basename(root)
if imageSet != "Kodak":
continue
testRef = trs.ResultSet(imageSet)
testRef.load_from_file(fullPath)
results[imageSet][quality]["ref-%s" % encoder] = testRef
return results
def get_series(results, tgtEncoder, tgtQuality, resFilter=lambda x: True):
psnrData = []
mtsData = []
marker = []
records = []
for imageSet, iResults in results.items():
for quality, qResults in iResults.items():
if quality != tgtQuality:
continue
for encoder, eResults in qResults.items():
if encoder != tgtEncoder:
continue
for record in eResults.records:
if resFilter(record):
records.append(record)
psnrData.append(record.psnr)
mtsData.append(record.cRate)
if "ldr-xy" in record.name:
marker.append('$N$')
elif "ldr-l" in record.name:
marker.append('$G$')
elif "ldr" in record.name:
marker.append('$L$')
elif "hdr" in record.name:
marker.append('$H$')
else:
marker.append('$?$')
return mtsData, psnrData, marker, records
def get_series_rel(results, refEncoder, refQuality, tgtEncoder, tgtQuality, resFilter=lambda x: True):
mts1, psnr1, marker1, rec1 = get_series(results, tgtEncoder, tgtQuality, resFilter)
if refEncoder is None:
refEncoder = tgtEncoder
if refQuality is None:
refQuality = tgtQuality
mts2, psnr2, marker2, rec2 = get_series(results, refEncoder, refQuality, resFilter)
mtsm = [x/mts2[i] for i, x in enumerate(mts1)]
psnrm = [x - psnr2[i] for i, x in enumerate(psnr1)]
return mtsm, psnrm, marker1, rec1
def get_human_eq_name(encoder, quality):
parts = encoder.split("-")
if len(parts) == 2:
return "astcenc %s -%s" % (parts[1], quality)
else:
return "astcenc-%s %s -%s" % (parts[2], parts[1], quality)
def get_human_e_name(encoder):
parts = encoder.split("-")
if len(parts) == 2:
return "astcenc %s" % parts[1]
else:
return "astcenc-%s %s" % (parts[2], parts[1])
def get_human_q_name(quality):
return "-%s" % quality
def plot(results, chartRows, chartCols, blockSizes,
relative, pivotEncoder, pivotQuality, fileName, limits):
fig, axs = plt.subplots(nrows=len(chartRows), ncols=len(chartCols),
sharex=True, sharey=True, figsize=(15, 8.43))
for a in fig.axes:
a.tick_params(
axis="x", which="both",
bottom=True, top=False, labelbottom=True)
a.tick_params(
axis="y", which="both",
left=True, right=False, labelleft=True)
for i, row in enumerate(chartRows):
for j, col in enumerate(chartCols):
if row == "fastest" and (("1.7" in col) or ("2.0" in col)):
if len(chartCols) == 1:
fig.delaxes(axs[i])
else:
fig.delaxes(axs[i][j])
continue
if len(chartRows) == 1 and len(chartCols) == 1:
ax = axs
elif len(chartCols) == 1:
ax = axs[i]
else:
ax = axs[i, j]
title = get_human_eq_name(col, row)
if not relative:
ax.set_title(title, y=0.97, backgroundcolor="white")
ax.set_xlabel('Coding performance (MTex/s)')
ax.set_ylabel('PSNR (dB)')
else:
if pivotEncoder and pivotQuality:
tag = get_human_eq_name(pivotEncoder, pivotQuality)
elif pivotEncoder:
tag = get_human_e_name(pivotEncoder)
else:
assert(pivotQuality)
tag = get_human_q_name(pivotQuality)
ax.set_title("%s vs. %s" % (title, tag), y=0.97, backgroundcolor="white")
ax.set_xlabel('Performance scaling')
ax.set_ylabel('PSNR delta (dB)')
for k, series in enumerate(blockSizes):
fn = lambda x: x.blkSz == series
if not relative:
x, y, m, r = get_series(results, col, row, fn)
else:
x, y, m, r = get_series_rel(results, pivotEncoder, pivotQuality,
col, row, fn)
color = None
label = "%s blocks" % series
for xp, yp, mp in zip(x, y, m):
ax.scatter([xp],[yp], s=16, marker=mp,
color="C%u" % k, label=label)
label = None
if i == 0 and j == 0:
ax.legend(loc="lower right")
for i, row in enumerate(chartRows):
for j, col in enumerate(chartCols):
if len(chartRows) == 1 and len(chartCols) == 1:
ax = axs
elif len(chartCols) == 1:
ax = axs[i]
else:
ax = axs[i, j]
ax.grid(ls=':')
if not relative:
ax.set_xlim(left=0, right=limits[0])
else:
ax.set_xlim(left=1, right=limits[0])
fig.tight_layout()
fig.savefig(fileName)
def main():
"""
The main function.
Returns:
int: The process return code.
"""
absoluteXLimit = 60
charts = [
# --------------------------------------------------------
# Latest in stable series charts
[
# Relative scores
["thorough", "medium", "fast"],
["ref-2.5-avx2", "ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-1.7",
None,
"relative-stable-series.png",
(None, None)
], [
# Absolute scores
["thorough", "medium", "fast"],
["ref-1.7", "ref-2.5-avx2", "ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
False,
None,
None,
"absolute-stable-series.png",
(absoluteXLimit, None)
],
# --------------------------------------------------------
# Latest 2.x vs 1.7 release charts
[
# Relative scores
["thorough", "medium", "fast", "fastest"],
["ref-2.5-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-1.7",
None,
"relative-2.x-vs-1.x.png",
(None, None)
], [
# Absolute scores
["thorough", "medium", "fast", "fastest"],
["ref-1.7", "ref-2.5-avx2"],
["4x4", "6x6", "8x8"],
False,
None,
None,
"absolute-2.x-vs-1.x.png",
(absoluteXLimit, None)
], [
# Relative ISAs of latest
["thorough", "medium", "fast", "fastest"],
["ref-2.5-sse4.1", "ref-2.5-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-2.5-sse2",
None,
"relative-2.x-isa.png",
(None, None)
], [
# Relative quality of latest
["medium", "fast", "fastest"],
["ref-2.5-avx2"],
["4x4", "6x6", "8x8"],
True,
None,
"thorough",
"relative-2.x-quality.png",
(None, None)
],
# --------------------------------------------------------
# Latest 3.x vs 2.5 release charts
[
# Relative scores
["thorough", "medium", "fast", "fastest"],
["ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-2.5-avx2",
None,
"relative-3.x-vs-2.x.png",
(None, None)
], [
# Absolute scores
["thorough", "medium", "fast", "fastest"],
["ref-2.5-avx2", "ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
False,
None,
None,
"absolute-3.x-vs-2.x.png",
(absoluteXLimit, None)
], [
# Relative ISAs of latest
["thorough", "medium", "fast", "fastest"],
["ref-3.0-sse4.1", "ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-3.0-sse2",
None,
"relative-3.x-isa.png",
(None, None)
], [
# Relative quality of latest
["medium", "fast", "fastest"],
["ref-3.0-avx2"],
["4x4", "6x6", "8x8"],
True,
None,
"thorough",
"relative-3.x-quality.png",
(None, None)
],
# --------------------------------------------------------
# Latest 3.x vs 2.5 release charts
[
# Relative scores
["thorough", "medium", "fast", "fastest"],
["ref-main-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-3.0-avx2",
None,
"relative-main-vs-3.x.png",
(None, None),
1
],
[
# Relative scores
["thorough", "medium", "fast"],
["ref-main-avx2"],
["4x4", "6x6", "8x8"],
True,
"ref-1.7",
None,
"relative-main-vs-1.x.png",
(None, None),
1
],
]
results = find_reference_results()
# Force select is triggered by adding a trailing entry to the argument list
# of the charts that you want rendered; designed for debugging use cases
maxIndex = 0
expectedLength = 8
for chart in charts:
maxIndex = max(maxIndex, len(chart))
for chart in charts:
# If force select is enabled then only keep the forced ones
if len(chart) != maxIndex:
print("Skipping %s" % chart[6])
continue
else:
print("Generating %s" % chart[6])
# If force select is enabled then strip the dummy force option
if maxIndex != expectedLength:
chart = chart[:expectedLength]
plot(results, *chart)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
mjgrav2001/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
alexlee-gk/visual_dynamics | visual_dynamics/utils/rl_util.py | 1 | 11060 | from __future__ import division, print_function
import time
import matplotlib.animation as manimation
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from visual_dynamics.envs.env_spec import EnvSpec
from visual_dynamics.utils import iter_util
from visual_dynamics.utils.container import ImageDataContainer
from visual_dynamics.gui.grid_image_visualizer import GridImageVisualizer
def split_observations(observations):
curr_observations = [observations_[:-1] for observations_ in observations]
next_observations = [observations_[1:] for observations_ in observations]
return curr_observations, next_observations
def discount_return(rewards, gamma):
return np.dot(rewards, gamma ** np.arange(len(rewards)))
def discount_returns(rewards, gamma):
return [discount_return(rewards_, gamma) for rewards_ in rewards]
class FeaturePredictorServoingImageVisualizer(object):
def __init__(self, predictor, visualize=1, window_title=None):
self.predictor = predictor
self.visualize = visualize
if visualize:
rows, cols = 1, 3
labels = [predictor.input_names[0], predictor.input_names[0] + ' next', predictor.input_names[0] + ' target']
if visualize > 1:
feature_names = iter_util.flatten_tree(predictor.feature_name)
next_feature_names = iter_util.flatten_tree(predictor.next_feature_name)
assert len(feature_names) == len(next_feature_names)
rows += len(feature_names)
cols += 1
labels.insert(2, '')
for feature_name, next_feature_name in zip(feature_names, next_feature_names):
labels += [feature_name, feature_name + ' next', next_feature_name, feature_name + ' target']
fig = plt.figure(figsize=(4 * cols, 4 * rows), frameon=False, tight_layout=True)
if window_title is None:
try:
window_title = predictor.solvers[-1].snapshot_prefix
except IndexError:
window_title = predictor.name
fig.canvas.set_window_title(window_title)
gs = gridspec.GridSpec(1, 1)
self.image_visualizer = GridImageVisualizer(fig, gs[0], rows * cols, rows=rows, cols=cols, labels=labels)
plt.show(block=False)
else:
self.image_visualizer = None
def update(self, image, next_image, target_image, action):
if self.visualize:
vis_images = [image, next_image, target_image]
vis_images = list(*self.predictor.preprocess([np.array(vis_images)]))
if self.visualize == 1:
vis_features = vis_images
else:
feature = self.predictor.feature([image])
feature_next = self.predictor.feature([next_image])
feature_next_pred = self.predictor.next_feature([image, action])
feature_target = self.predictor.feature([target_image])
# put all features into a flattened list
vis_features = [feature, feature_next, feature_next_pred, feature_target]
vis_features = [vis_features[icol][irow]
for irow in range(self.image_visualizer.rows - 1)
for icol in range(self.image_visualizer.cols)]
vis_images.insert(2, None)
vis_features = vis_images + vis_features
self.image_visualizer.update(vis_features)
def do_rollouts(env, pol, num_trajs, num_steps, target_distance=0,
output_dir=None, image_visualizer=None, record_file=None,
verbose=False, gamma=0.9, seeds=None, reset_states=None,
cv2_record_file=None, image_transformer=None, ret_rewards_only=False, close_env=False):
"""
image_transformer is for the returned observations and for cv2's video writer
"""
random_state = np.random.get_state()
if reset_states is None:
reset_states = [None] * num_trajs
else:
num_trajs = min(num_trajs, len(reset_states))
if output_dir:
container = ImageDataContainer(output_dir, 'x')
container.reserve(list(env.observation_space.spaces.keys()) + ['state'], (num_trajs, num_steps + 1))
container.reserve(['action', 'reward'], (num_trajs, num_steps))
container.add_info(environment_config=env.get_config())
container.add_info(env_spec_config=EnvSpec(env.action_space, env.observation_space).get_config())
container.add_info(policy_config=pol.get_config())
else:
container = None
if record_file:
if image_visualizer is None:
raise ValueError('image_visualizer cannot be None for recording')
FFMpegWriter = manimation.writers['ffmpeg']
writer = FFMpegWriter(fps=1.0 / env.dt)
fig = plt.gcf()
writer.setup(fig, record_file, fig.dpi)
if cv2_record_file:
import cv2
fourcc = cv2.VideoWriter_fourcc(*'X264')
image_shape = env.observation_space.spaces['image'].shape[:2]
if image_transformer:
image_shape = image_transformer.preprocess_shape(image_shape)
video_writer = cv2.VideoWriter(cv2_record_file, fourcc, 1.0 / env.dt, image_shape[:2][::-1])
def preprocess_image(obs):
if image_transformer:
if isinstance(obs, dict):
obs = dict(obs)
for name, maybe_image in obs.items():
if name.endswith('image'):
obs[name] = image_transformer.preprocess(maybe_image)
else:
obs = image_transformer.preprocess(obs)
return obs
start_time = time.time()
if verbose:
errors_header_format = '{:>30}{:>15}'
errors_row_format = '{:>30}{:>15.4f}'
print(errors_header_format.format('(traj_iter, step_iter)', 'reward'))
if ret_rewards_only:
rewards = []
else:
states, observations, actions, rewards = [], [], [], []
frame_iter = 0
done = False
for traj_iter, state in enumerate(reset_states):
if verbose:
print('=' * 45)
if seeds is not None and len(seeds) > traj_iter:
np.random.seed(seed=seeds[traj_iter])
if ret_rewards_only:
rewards_ = []
else:
states_, observations_, actions_, rewards_ = [], [], [], []
obs = env.reset(state)
frame_iter += 1
if state is None:
state = env.get_state()
if target_distance:
raise NotImplementedError
for step_iter in range(num_steps):
try:
if not ret_rewards_only:
observations_.append(preprocess_image(obs))
states_.append(state)
if container:
container.add_datum(traj_iter, step_iter, state=state, **obs)
if cv2_record_file:
vis_image = obs['image'].copy()
vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
vis_image = image_transformer.preprocess(vis_image)
video_writer.write(vis_image)
action = pol.act(obs)
prev_obs = obs
obs, reward, episode_done, _ = env.step(action) # action is updated in-place if needed
frame_iter += 1
if verbose:
print(errors_row_format.format(str((traj_iter, step_iter)), reward))
prev_state, state = state, env.get_state()
if not ret_rewards_only:
actions_.append(action)
rewards_.append(reward)
if step_iter == (num_steps - 1) or episode_done:
if not ret_rewards_only:
observations_.append(preprocess_image(obs))
states_.append(state)
if container:
container.add_datum(traj_iter, step_iter, action=action, reward=reward)
if step_iter == (num_steps - 1) or episode_done:
container.add_datum(traj_iter, step_iter + 1, state=state, **obs)
if step_iter == (num_steps - 1) or episode_done:
if cv2_record_file:
vis_image = obs['image'].copy()
vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
vis_image = image_transformer.preprocess(vis_image)
video_writer.write(vis_image)
if image_visualizer:
env.render()
image = prev_obs['image']
next_image = obs['image']
target_image = obs['target_image']
try:
import tkinter
except ImportError:
import Tkinter as tkinter
try:
image_visualizer.update(image, next_image, target_image, action)
if record_file:
writer.grab_frame()
except tkinter.TclError:
done = True
if done or episode_done:
break
except KeyboardInterrupt:
break
if verbose:
print('-' * 45)
print(errors_row_format.format('discounted return', discount_return(rewards_, gamma)))
print(errors_row_format.format('return', discount_return(rewards_, 1.0)))
if not ret_rewards_only:
states.append(states_)
observations.append(observations_)
actions.append(actions_)
rewards.append(rewards_)
if done:
break
if cv2_record_file:
video_writer.release()
if verbose:
print('=' * 45)
print(errors_row_format.format('mean discounted return', np.mean(discount_returns(rewards, gamma))))
print(errors_row_format.format('mean return', np.mean(discount_returns(rewards, 1.0))))
else:
discounted_returns = discount_returns(rewards, gamma)
print('mean discounted return: %.4f (%.4f)' % (np.mean(discounted_returns),
np.std(discounted_returns) / np.sqrt(len(discounted_returns))))
returns = discount_returns(rewards, 1.0)
print('mean return: %.4f (%.4f)' % (np.mean(returns),
np.std(returns) / np.sqrt(len(returns))))
if close_env:
env.close()
if record_file:
writer.finish()
if container:
container.close()
end_time = time.time()
if verbose:
print("average FPS: {}".format(frame_iter / (end_time - start_time)))
np.random.set_state(random_state)
if ret_rewards_only:
return rewards
else:
return states, observations, actions, rewards
| mit |
akionakamura/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
hlin117/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
cxhernandez/msmbuilder | msmbuilder/project_templates/tica/tica-sample-coordinate-plot.py | 9 | 1174 | """Plot the result of sampling a tICA coordinate
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ttrajs = load_trajs('ttrajs')
txx = np.concatenate(list(ttrajs.values()))
inds = load_generic("tica-dimension-0-inds.pickl")
straj = []
for traj_i, frame_i in inds:
straj += [ttrajs[traj_i][frame_i, :]]
straj = np.asarray(straj)
## Overlay sampled trajectory on histogram
def plot_sampled_traj(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='magma_r',
mincnt=1,
bins='log',
alpha=0.8,
)
ax.plot(straj[:, 0], straj[:, 1], 'o-', label='Sampled')
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
ax.legend(loc='best')
## Plot
fig, ax = plt.subplots(figsize=(7, 5))
plot_sampled_traj(ax)
fig.tight_layout()
fig.savefig('tica-dimension-0-heatmap.pdf')
# {{xdg_open('tica-dimension-0-heatmap.pdf')}}
| lgpl-2.1 |
lpouillo/execo-g5k-tools | tutorial/draw_mpi_bench.py | 1 | 2005 | #!/usr/bin/env python
import optparse, os, re, fileinput
def draw_results(result_dir, plotfile):
import matplotlib
if plotfile:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
data = {} # dict: key = cluster, value = dict: key = problem size, value = tuple([n_cores], [times])
filename_re = re.compile("^cluster-(\w+)-n_core-(\d+)-size-(\w+)\.out$")
time_re = re.compile(" Time in seconds =\s*(\S+)")
for fname in os.listdir(result_dir):
mo = filename_re.match(fname)
if mo:
cluster = mo.group(1)
n_core = int(mo.group(2))
size = mo.group(3)
t = None
for line in fileinput.input(result_dir + "/" + fname):
mo2 = time_re.match(line)
if mo2:
t = float(mo2.group(1))
if cluster not in data: data[cluster] = {}
if size not in data[cluster]: data[cluster][size] = []
data[cluster][size].append((n_core, t))
for i, cluster in enumerate(data):
plt.figure()
plt.title(cluster)
plt.xlabel('num cores')
plt.ylabel('completion time')
for size in data[cluster]:
data[cluster][size].sort(cmp = lambda e, f: cmp(e[0], f[0]))
plt.plot([ d[0] for d in data[cluster][size] ],
[ d[1] for d in data[cluster][size] ],
label = "size %s" % (size,))
plt.legend()
if plotfile:
plt.savefig(re.sub('(\.\w+$)', '_%i\\1' % i, plotfile))
if not plotfile:
plt.show()
if __name__ == "__main__":
parser = optparse.OptionParser(usage = "%prog [options] <dir>")
parser.add_option("-f", dest="plotfile", default = None,
help="write plot to image file")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
result_dir = args[0]
draw_results(result_dir, options.plotfile)
| gpl-3.0 |
assad2012/ggplot | ggplot/tests/test_stat_calculate_methods.py | 12 | 2240 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from nose.tools import (assert_equal, assert_is, assert_is_not,
assert_raises)
import pandas as pd
from ggplot import *
from ggplot.utils.exceptions import GgplotError
from . import cleanup
@cleanup
def test_stat_bin():
# stat_bin needs the 'x' aesthetic to be numeric or a categorical
# and should complain if given anything else
class unknown(object):
pass
x = [unknown()] * 3
y = [1, 2, 3]
df = pd.DataFrame({'x': x, 'y': y})
gg = ggplot(aes(x='x', y='y'), df)
with assert_raises(GgplotError):
print(gg + stat_bin())
@cleanup
def test_stat_abline():
# slope and intercept function should return values
# of the same length
def fn_xy(x, y):
return [1, 2]
def fn_xy2(x, y):
return [1, 2, 3]
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# same length, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy2))
@cleanup
def test_stat_vhabline_functions():
def fn_x(x):
return 1
def fn_y(y):
return 1
def fn_xy(x, y):
return 1
gg = ggplot(aes(x='wt'), mtcars)
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy))
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(intercept=fn_xy))
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# Functions with 2 args, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
# slope function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_x, intercept=fn_xy))
# intercept function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_y))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_vline(xintercept=fn_xy))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_hline(yintercept=fn_xy))
| bsd-2-clause |
RPGOne/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
smsolivier/VEF | tex/paper/perm_dl.py | 1 | 1821 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../code')
import ld as LD
from exactDiff import exactDiff
from hidespines import *
''' compare the permuations of linear representation in diffusion limit '''
if (len(sys.argv) > 1):
outfile = sys.argv[1:]
else:
outfile = None
Nruns = 15
eps = np.logspace(-3, 0, Nruns)
tol = 1e-10
def getIt(eps, opt, gauss):
N = 50
N *= 15
xb = 10
x0 = np.linspace(0, xb, N+1)
Sigmat = lambda x: 1
Sigmaa = lambda x: .1
q = lambda x: 1
n = 8
it = np.zeros(len(eps))
diff = np.zeros(len(eps))
for i in range(len(eps)):
sol = LD.Eddington(x0, n, lambda x: eps[i], lambda x: 1/eps[i],
lambda x, mu: eps[i], OPT=opt, GAUSS=gauss)
x, phi, it[i] = sol.sourceIteration(tol, maxIter=200)
phi_ex = exactDiff(eps[i], 1/eps[i], eps[i], xb)
diff[i] = np.linalg.norm(phi - phi_ex(x), 2)/np.linalg.norm(phi_ex(x), 2)
return diff, it
diff0, it0 = getIt(eps, 3, 1)
diff1, it1 = getIt(eps, 2, 1)
# diff01, it01 = getIt(eps, 0, 1)
# diff11, it11 = getIt(eps, 1, 1)
# diff20, it20 = getIt(eps, 2, 0)
# diff21, it21 = getIt(eps, 2, 1)
plt.figure()
plt.loglog(eps, it0, '-o', clip_on=False, label='Flat')
plt.loglog(eps, it1, '-*', clip_on=False, label='van Leer')
plt.xlabel(r'$\epsilon$', fontsize=18)
plt.ylabel('Number of Iterations', fontsize=18)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile[0])
plt.figure()
plt.loglog(eps, diff0, '-o', clip_on=False, label='Flat')
plt.loglog(eps, diff1, '-*', clip_on=False, label='van Leer')
plt.xlabel(r'$\epsilon$', fontsize=18)
plt.ylabel('Error', fontsize=18)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile[1])
else:
plt.show() | mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 1 | 25266 | """
An experimental support for curvilinear grid.
"""
from itertools import chain
from grid_finder import GridFinder
from axislines import \
AxisArtistHelper, GridHelperBase
from axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
ti1 = g.get_tick_iterator(self.nth_coord_ticks, self.side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(zip(x, y))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g. x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self):
grid_lines = []
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in ax1.axis.itervalues():
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
hsiaoyi0504/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
jm-begon/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
he7d3r/revscoring | revscoring/scoring/models/__init__.py | 2 | 1442 | """
This module contains a collection of models that implement a simple function:
:func:`~revscoring.Model.score`. Currently, all models are
a subclass of :class:`revscoring.scoring.models.Learned`
which means that they also implement
:meth:`~revscoring.scoring.models.Learned.train` and
:meth:`~revscoring.scoring.models.Learned.cross_validate`.
Gradient Boosting
+++++++++++++++++
.. automodule:: revscoring.scoring.models.gradient_boosting
Naive Bayes
+++++++++++
.. automodule:: revscoring.scoring.models.naive_bayes
Linear Regression
+++++++++++++++++
.. automodule:: revscoring.scoring.models.linear
Support Vector
++++++++++++++
.. automodule:: revscoring.scoring.models.svc
Random Forest
+++++++++++++
.. automodule:: revscoring.scoring.models.random_forest
Abstract classes
++++++++++++++++
.. automodule:: revscoring.scoring.models.model
SciKit Learn-based models
+++++++++++++++++++++++++
.. automodule:: revscoring.scoring.models.sklearn
"""
from .gradient_boosting import GradientBoosting
from .linear import LogisticRegression
from .model import Classifier, Learned, open_file
from .naive_bayes import BernoulliNB, GaussianNB, MultinomialNB, NaiveBayes
from .random_forest import RandomForest
from .svc import RBFSVC, SVC, LinearSVC
__all__ = [
Learned, Classifier, open_file,
SVC, LinearSVC, RBFSVC, NaiveBayes, GaussianNB, MultinomialNB, BernoulliNB,
RandomForest, GradientBoosting, LogisticRegression
]
| mit |
sumspr/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/container.py | 11 | 3370 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method(self)
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
return d
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in list(six.iteritems(self._propobservers)):
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| mit |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/parser/test_read_fwf.py | 2 | 16032 | # -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
def test_whitespace_preservation(self):
# Addresses Issue #16772
data_expected = """
a ,bbb
cc,dd """
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a bbb
ccdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0], delimiter="\n\t")
tm.assert_frame_equal(result, expected)
def test_default_delimiter(self):
data_expected = """
a,bbb
cc,dd"""
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0])
tm.assert_frame_equal(result, expected)
| gpl-2.0 |
NERC-CEH/ecomaps | ecomaps/lib/wmsvizMetadataImageBuilder.py | 1 | 5595 | """
Creates the metadata caption for figures in the style used by WMSViz.
Based on code originally in figure_builder.
@author rwilkinson
"""
import logging
import time
from cStringIO import StringIO
try:
from PIL import Image
except:
import Image
from matplotlib.figure import Figure
from matplotlib.backends.backend_cairo import FigureCanvasCairo as FigureCanvas
from matplotlib.backends.backend_cairo import RendererCairo as Renderer
from matplotlib.transforms import Bbox
from matplotlib.patches import Rectangle
from pylons import config
import ecomaps.lib.image_util as image_util
log = logging.getLogger(__name__)
metadataFont = {'weight':'normal',
'family':'sans-serif',
'size':'12'}
titleFont = {'weight':'normal',
'family':'sans-serif',
'size':'16'}
borderColor = 'grey'
class WmsvizMetadataImageBuilder(object):
def __init__(self, params):
pass
def getFigureSpacings(self):
"""Returns the vertical spaces between the components of a figure.
"""
return (30, 0, 0)
def buildMetadataImage(self, layerInfoList, width):
"""
Creates the metadata caption for figures in the style used by WMSViz.
"""
self.metadataItems = self._buildMetadataItems(layerInfoList)
self.width = width
width=self.width;height=1600;dpi=100;transparent=False
figsize=(width / float(dpi), height / float(dpi))
fig = Figure(figsize=figsize, dpi=dpi, facecolor='w', frameon=(not transparent))
axes = fig.add_axes([0.04, 0.04, 0.92, 0.92], frameon=True,xticks=[], yticks=[])
renderer = Renderer(fig.dpi)
title, titleHeight = self._drawTitleToAxes(axes, renderer)
txt, textHeight = self._drawMetadataTextToAxes(axes, renderer, self.metadataItems)
# fit the axis round the text
pos = axes.get_position()
newpos = Bbox( [[pos.x0, pos.y1 - (titleHeight + textHeight) / height], [pos.x1, pos.y1]] )
axes.set_position(newpos )
# position the text below the title
newAxisHeight = (newpos.y1 - newpos.y0) * height
txt.set_position( (0.02, 0.98 - (titleHeight/newAxisHeight) ))
for loc, spine in axes.spines.iteritems():
spine.set_edgecolor(borderColor)
# Draw heading box
headingBoxHeight = titleHeight - 1
axes.add_patch(Rectangle((0, 1.0 - (headingBoxHeight/newAxisHeight)), 1, (headingBoxHeight/newAxisHeight),
facecolor=borderColor,
fill = True,
linewidth=0))
# reduce the figure height
originalHeight = fig.get_figheight()
pos = axes.get_position()
topBound = 20 / float(dpi)
textHeight = (pos.y1 - pos.y0) * originalHeight
newHeight = topBound * 2 + textHeight
# work out the new proportions for the figure
border = topBound / float(newHeight)
newpos = Bbox( [[pos.x0, border], [pos.x1, 1 - border]] )
axes.set_position(newpos )
fig.set_figheight(newHeight)
return image_util.figureToImage(fig)
def _drawMetadataTextToAxes(self, axes, renderer, metadataItems):
'''
Draws the metadata text to the axes
@param axes: the axes to draw the text on
@type axes: matplotlib.axes.Axes
@param renderer: the matplotlib renderer to evaluate the text size
@param metadataItems: a list of metadata items to get the text form
@return: the text object, the total metadata text height in pixels
'''
lines = self.metadataItems
text = '\n'.join(lines)
txt = axes.text(0.02, 0.98 ,text,
fontdict=metadataFont,
horizontalalignment='left',
verticalalignment='top',)
extent = txt.get_window_extent(renderer)
textHeight = (extent.y1 - extent.y0 + 10)
return txt, textHeight
def _drawTitleToAxes(self, axes, renderer):
'''
Draws the metadata tile text onto the axes
@return: the text object, the height of the title text in pixels
'''
titleText = self._getTitleText()
title = axes.text(0.02,0.98,titleText,
fontdict=titleFont,
horizontalalignment='left',
verticalalignment='top',)
extent = title.get_window_extent(renderer)
titleHeight = (extent.y1 - extent.y0 + 8)
return title, titleHeight
def _getTitleText(self):
titleText = "Plot Metadata:"
additionalText = config.get('additional_figure_text', '')
if additionalText != "":
if additionalText.find('<date>') > 0:
timeString = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
additionalText = additionalText.replace("<date>", timeString)
titleText += " %s" % (additionalText,)
return titleText
def _buildMetadataItems(self, layerInfoList):
items = []
for i in range(len(layerInfoList)):
li = layerInfoList[i]
j = i + 1
items.append("%s:endpoint = %s layerName = %s" % (j, li.endpoint, li.layerName))
items.append("%s:params = %s" % (j, li.params))
return items
| gpl-2.0 |
jelugbo/hebs_master | docs/en_us/developers/source/conf.py | 30 | 6955 | # -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=W0622
# pylint: disable=W0212
# pylint: disable=W0613
import sys, os
from path import path
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "common/djangoapps")
sys.path.append(root / "common/lib")
sys.path.append(root / "common/lib/capa")
sys.path.append(root / "common/lib/chem")
sys.path.append(root / "common/lib/sandbox-packages")
sys.path.append(root / "common/lib/xmodule")
sys.path.append(root / "common/lib/opaque_keys")
sys.path.append(root / "lms/djangoapps")
sys.path.append(root / "lms/lib")
sys.path.append(root / "cms/djangoapps")
sys.path.append(root / "cms/lib")
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform Developer Documentation'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
Myasuka/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
nre-aachen/GeMpy | gempy/DataManagement.py | 1 | 66518 | from __future__ import division
import os
from os import path
import sys
# This is for sphenix to find the packages
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import copy
import numpy as np
import pandas as pn
from gempy import theanograf
import theano
class InputData(object):
"""
-DOCS NOT UPDATED- Class to import the raw data of the model and set data classifications into formations and series
Args:
extent (list): [x_min, x_max, y_min, y_max, z_min, z_max]
Resolution ((Optional[list])): [nx, ny, nz]. Defaults to 50
path_i: Path to the data bases of interfaces. Default os.getcwd(),
path_f: Path to the data bases of foliations. Default os.getcwd()
Attributes:
extent(list): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution ((Optional[list])): [nx, ny, nz]
Foliations(pandas.core.frame.DataFrame): Pandas data frame with the foliations data
Interfaces(pandas.core.frame.DataFrame): Pandas data frame with the interfaces data
formations(numpy.ndarray): Dictionary that contains the name of the formations
series(pandas.core.frame.DataFrame): Pandas data frame which contains every formation within each series
"""
# TODO: Data management using pandas, find an easy way to add values
# TODO: Probably at some point I will have to make an static and dynamic data classes
def __init__(self,
extent,
resolution=[50, 50, 50],
path_i=None, path_f=None,
**kwargs):
# Set extent and resolution
self.extent = np.array(extent)
self.resolution = np.array(resolution)
self.n_faults = 0
# TODO choose the default source of data. So far only csv
# Create the pandas dataframes
# if we dont read a csv we create an empty dataframe with the columns that have to be filled
self.foliations = pn.DataFrame(columns=['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity',
'formation', 'series', 'X_std', 'Y_std', 'Z_std',
'dip_std', 'azimuth_std'])
self.interfaces = pn.DataFrame(columns=['X', 'Y', 'Z', 'formation', 'series',
'X_std', 'Y_std', 'Z_std'])
if path_f or path_i:
self.import_data(path_i=path_i, path_f=path_f)
# DEP-
self._set_formations()
# If not provided set default series
self.series = self.set_series()
# DEP- self.set_formation_number()
# Compute gradients given azimuth and dips to plot data
self.calculate_gradient()
# Create default grid object. TODO: (Is this necessary now?)
self.grid = self.set_grid(extent=None, resolution=None, grid_type="regular_3D", **kwargs)
def import_data(self, path_i, path_f, **kwargs):
"""
Args:
path_i:
path_f:
**kwargs:
Returns:
"""
if path_f:
self.foliations = self.load_data_csv(data_type="foliations", path=path_f, **kwargs)
assert set(['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity', 'formation']).issubset(self.foliations.columns), \
"One or more columns do not match with the expected values " + str(self.foliations.columns)
if path_i:
self.interfaces = self.load_data_csv(data_type="interfaces", path=path_i, **kwargs)
assert set(['X', 'Y', 'Z', 'formation']).issubset(self.interfaces.columns), \
"One or more columns do not match with the expected values " + str(self.interfaces.columns)
def _set_formations(self):
"""
-DEPRECATED- Function to import the formations that will be used later on. By default all the formations in the tables are
chosen.
Returns:
pandas.core.frame.DataFrame: Data frame with the raw data
"""
try:
# foliations may or may not be in all formations so we need to use interfaces
self.formations = self.interfaces["formation"].unique()
# TODO: Trying to make this more elegant?
# for el in self.formations:
# for check in self.formations:
# assert (el not in check or el == check), "One of the formations name contains other" \
# " string. Please rename." + str(el) + " in " + str(
# check)
# TODO: Add the possibility to change the name in pandas directly
# (adding just a 1 in the contained string)
except AttributeError:
pass
def calculate_gradient(self):
"""
Calculate the gradient vector of module 1 given dip and azimuth to be able to plot the foliations
Returns:
self.foliations: extra columns with components xyz of the unity vector.
"""
self.foliations['G_x'] = np.sin(np.deg2rad(self.foliations["dip"].astype('float'))) * \
np.sin(np.deg2rad(self.foliations["azimuth"].astype('float'))) * \
self.foliations["polarity"].astype('float')
self.foliations['G_y'] = np.sin(np.deg2rad(self.foliations["dip"].astype('float'))) * \
np.cos(np.deg2rad(self.foliations["azimuth"].astype('float'))) *\
self.foliations["polarity"].astype('float')
self.foliations['G_z'] = np.cos(np.deg2rad(self.foliations["dip"].astype('float'))) *\
self.foliations["polarity"].astype('float')
# DEP?
def create_grid(self, extent=None, resolution=None, grid_type="regular_3D", **kwargs):
"""
Method to initialize the class grid. So far is really simple and only has the regular grid type
Args:
grid_type (str): regular_3D or regular_2D (I am not even sure if regular 2D still working)
**kwargs: Arbitrary keyword arguments.
Returns:
self.grid(GeMpy_core.grid): Object that contain different grids
"""
if not extent:
extent = self.extent
if not resolution:
resolution = self.resolution
return self.GridClass(extent, resolution, grid_type=grid_type, **kwargs)
def set_grid(self, new_grid=None, extent=None, resolution=None, grid_type="regular_3D", **kwargs):
"""
Method to initialize the class new_grid. So far is really simple and only has the regular new_grid type
Args:
grid_type (str): regular_3D or regular_2D (I am not even sure if regular 2D still working)
**kwargs: Arbitrary keyword arguments.
Returns:
self.new_grid(GeMpy_core.new_grid): Object that contain different grids
"""
if new_grid is not None:
assert new_grid.shape[1] is 3 and len(new_grid.shape) is 2, 'The shape of new grid must be (n,3) where n is' \
'the number of points of the grid'
self.grid.grid = new_grid
else:
if not extent:
extent = self.extent
if not resolution:
resolution = self.resolution
return self.GridClass(extent, resolution, grid_type=grid_type, **kwargs)
def data_to_pickle(self, path=False):
if not path:
path = './geo_data'
import pickle
with open(path+'.pickle', 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def get_raw_data(self, itype='all'):
"""
Method that returns the interfaces and foliations pandas Dataframes. Can return both at the same time or only
one of the two
Args:
itype: input data type, either 'foliations', 'interfaces' or 'all' for both.
Returns:
pandas.core.frame.DataFrame: Data frame with the raw data
"""
import pandas as pn
if itype == 'foliations':
raw_data = self.foliations
elif itype == 'interfaces':
raw_data = self.interfaces
elif itype == 'all':
raw_data = pn.concat([self.interfaces, self.foliations], keys=['interfaces', 'foliations'])
return raw_data
def i_open_set_data(self, itype="foliations"):
"""
Method to have interactive pandas tables in jupyter notebooks. The idea is to use this method to interact with
the table and i_close_set_data to recompute the parameters that depend on the changes made. I did not find a
easier solution than calling two different methods.
Args:
itype: input data type, either 'foliations' or 'interfaces'
Returns:
pandas.core.frame.DataFrame: Data frame with the changed data on real time
"""
# if the data frame is empty the interactive table is bugged. Therefore I create a default raw when the method
# is called
if self.foliations.empty:
self.foliations = pn.DataFrame(
np.array([0., 0., 0., 0., 0., 1., 'Default Formation', 'Default series']).reshape(1, 8),
columns=['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity', 'formation', 'series']).\
convert_objects(convert_numeric=True)
if self.interfaces.empty:
self.interfaces = pn.DataFrame(
np.array([0, 0, 0, 'Default Formation', 'Default series']).reshape(1, 5),
columns=['X', 'Y', 'Z', 'formation', 'series']).convert_objects(convert_numeric=True)
# TODO leave qgrid as a dependency since in the end I did not change the code of the package
import qgrid
# Setting some options
qgrid.nbinstall(overwrite=True)
qgrid.set_defaults(show_toolbar=True)
assert itype is 'foliations' or itype is 'interfaces', 'itype must be either foliations or interfaces'
import warnings
warnings.warn('Remember to call i_close_set_data after the editing.')
# We kind of set the show grid to a variable so we can close it afterwards
self.pandas_frame = qgrid.show_grid(self.get_raw_data(itype=itype))
# TODO set
def i_close_set_data(self):
"""
Method to have interactive pandas tables in jupyter notebooks. The idea is to use this method to interact with
the table and i_close_set_data to recompute the parameters that depend on the changes made. I did not find a
easier solution than calling two different methods.
Args:
itype: input data type, either 'foliations' or 'interfaces'
Returns:
pandas.core.frame.DataFrame: Data frame with the changed data on real time
"""
# We close it to guarantee that after this method it is not possible further modifications
self.pandas_frame.close()
# -DEP- self._set_formations()
# -DEP- self.set_formation_number()
# Set parameters
self.series = self.set_series()
self.calculate_gradient()
@staticmethod
def load_data_csv(data_type, path=os.getcwd(), **kwargs):
"""
Method to load either interface or foliations data csv files. Normally this is in which GeoModeller exports it
Args:
data_type (str): 'interfaces' or 'foliations'
path (str): path to the files. Default os.getcwd()
**kwargs: Arbitrary keyword arguments.
Returns:
pandas.core.frame.DataFrame: Data frame with the raw data
"""
# TODO: in case that the columns have a different name specify in pandas which columns are interfaces /
# coordinates, dips and so on.
# TODO: use pandas to read any format file not only csv
if data_type == "foliations":
return pn.read_csv(path, **kwargs)
elif data_type == 'interfaces':
return pn.read_csv(path, **kwargs)
else:
raise NameError('Data type not understood. Try interfaces or foliations')
# TODO if we load different data the Interpolator parameters must be also updated. Prob call gradients and
# series
def set_interfaces(self, interf_Dataframe, append=False):
"""
Method to change or append a Dataframe to interfaces in place.
Args:
interf_Dataframe: pandas.core.frame.DataFrame with the data
append: Bool: if you want to append the new data frame or substitute it
"""
assert set(['X', 'Y', 'Z', 'formation']).issubset(interf_Dataframe.columns), \
"One or more columns do not match with the expected values " + str(interf_Dataframe.columns)
if append:
self.interfaces = self.interfaces.append(interf_Dataframe)
else:
self.interfaces = interf_Dataframe
self._set_formations()
self.set_series()
#self.set_formation_number()
self.interfaces.reset_index(drop=True, inplace=True)
def set_foliations(self, foliat_Dataframe, append=False):
"""
Method to change or append a Dataframe to foliations in place.
Args:
interf_Dataframe: pandas.core.frame.DataFrame with the data
append: Bool: if you want to append the new data frame or substitute it
"""
assert set(['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity', 'formation']).issubset(
foliat_Dataframe.columns), "One or more columns do not match with the expected values " +\
str(foliat_Dataframe.columns)
if append:
self.foliations = self.foliations.append(foliat_Dataframe)
else:
self.foliations = foliat_Dataframe
self._set_formations()
self.set_series()
#self.set_formation_number()
self.calculate_gradient()
self.foliations.reset_index(drop=True, inplace=True)
def set_series(self, series_distribution=None, order=None):
"""
Method to define the different series of the project
Args:
series_distribution (dict): with the name of the serie as key and the name of the formations as values.
order(Optional[list]): order of the series by default takes the dictionary keys which until python 3.6 are
random. This is important to set the erosion relations between the different series
Returns:
self.series: A pandas DataFrame with the series and formations relations
self.interfaces: one extra column with the given series
self.foliations: one extra column with the given series
"""
if series_distribution is None:
# set to default series
# TODO see if some of the formations have already a series and not overwrite
_series = {"Default serie": self.interfaces["formation"].unique()}
else:
assert type(series_distribution) is dict, "series_distribution must be a dictionary, " \
"see Docstring for more information"
# TODO if self.series exist already maybe we should append instead of overwrite
_series = series_distribution
# The order of the series is very important since it dictates which one is on top of the stratigraphic pile
# If it is not given we take the dictionaries keys. NOTICE that until python 3.6 these keys are pretty much
# random
if not order:
order = _series.keys()
# TODO assert len order is equal to len of the dictionary
# We create a dataframe with the links
_series = pn.DataFrame(data=_series, columns=order)
# Now we fill the column series in the interfaces and foliations tables with the correspondant series and
# assigned number to the series
self.interfaces["series"] = [(i == _series).sum().argmax() for i in self.interfaces["formation"]]
self.interfaces["order_series"] = [(i == _series).sum().as_matrix().argmax() + 1
for i in self.interfaces["formation"]]
self.foliations["series"] = [(i == _series).sum().argmax() for i in self.foliations["formation"]]
self.foliations["order_series"] = [(i == _series).sum().as_matrix().argmax() + 1
for i in self.foliations["formation"]]
# We sort the series altough is only important for the computation (we will do it again just before computing)
self.interfaces.sort_values(by='order_series', inplace=True)
self.foliations.sort_values(by='order_series', inplace=True)
# Save the dataframe in a property
self.series = _series
# Set default faults
faults_series = []
for i in self.series.columns:
if ('fault' in i or 'Fault' in i) and 'Default' not in i:
faults_series.append(i)
self.set_faults(faults_series)
self.reset_indices()
return _series
def set_faults(self, series_name):
"""
Args:
series_name(list or array_like):
Returns:
"""
if not len(series_name) == 0:
self.interfaces['isFault'] = self.interfaces['series'].isin(series_name)
self.foliations['isFault'] = self.foliations['series'].isin(series_name)
self.n_faults = len(series_name)
def set_formation_number(self, formation_order):
"""
Set a unique number to each formation. NOTE: this method is getting deprecated since the user does not need
to know it and also now the numbers must be set in the order of the series as well. Therefore this method
has been moved to the interpolator class as preprocessing
Returns: Column in the interfaces and foliations dataframes
"""
try:
ip_addresses = formation_order
ip_dict = dict(zip(ip_addresses, range(1, len(ip_addresses)+1)))
self.interfaces['formation number'] = self.interfaces['formation'].replace(ip_dict)
self.foliations['formation number'] = self.foliations['formation'].replace(ip_dict)
except ValueError:
pass
def reset_indices(self):
"""
Resets dataframe indices for foliations and interfaces.
Returns: Nothing
"""
self.interfaces.reset_index(inplace=True, drop=True)
self.foliations.reset_index(inplace=True, drop=True)
def interface_modify(self, index, **kwargs):
"""
Allows modification of the x,y and/or z-coordinates of an interface at specified dataframe index.
Args:
index: dataframe index of the foliation point
**kwargs: X, Y, Z (int or float)
Returns: Nothing
"""
for key in kwargs:
self.interfaces.ix[index, str(key)] = kwargs[key]
def interface_add(self, **kwargs):
"""
Adds interface to dataframe.
Args:
**kwargs: X, Y, Z, formation, labels, order_series, series
Returns: Nothing
"""
l = len(self.interfaces)
for key in kwargs:
self.interfaces.ix[l, str(key)] = kwargs[key]
def interface_drop(self, index):
"""
Drops interface from dataframe identified by index
Args:
index: dataframe index
Returns: Nothing
"""
self.interfaces.drop(index, inplace=True)
def foliation_modify(self, index, **kwargs):
"""
Allows modification of foliation data at specified dataframe index.
Args:
index: dataframe index of the foliation point
**kwargs: G_x, G_y, G_z, X, Y, Z, azimuth, dip, formation, labels, order_series, polarity
Returns: Nothing
"""
for key in kwargs:
self.foliations.ix[index, str(key)] = kwargs[key]
def foliation_add(self, **kwargs):
"""
Adds foliation to dataframe.
Args:
**kwargs: G_x, G_y, G_z, X, Y, Z, azimuth, dip, formation, labels, order_series, polarity, series
Returns: Nothing
"""
l = len(self.foliations)
for key in kwargs:
self.foliations.ix[l, str(key)] = kwargs[key]
def foliations_drop(self, index):
"""
Drops foliation from dataframe identified by index
Args:
index: dataframe index
Returns: Nothing
"""
self.foliations.drop(index, inplace=True)
def get_formation_number(self):
pn_series = self.interfaces.groupby('formation number').formation.unique()
ip_addresses = {}
for e, i in enumerate(pn_series):
ip_addresses[i[0]] = e + 1
ip_addresses['DefaultBasement'] = 0
return ip_addresses
# TODO think where this function should go
def read_vox(self, path):
"""
read vox from geomodeller and transform it to gempy format
Returns:
numpy.array: block model
"""
geo_res = pn.read_csv(path)
geo_res = geo_res.iloc[9:]
#ip_addresses = geo_res['nx 50'].unique() # geo_data.interfaces["formation"].unique()
ip_dict = self.get_formation_number()
geo_res_num = geo_res.iloc[:, 0].replace(ip_dict)
block_geomodeller = np.ravel(geo_res_num.as_matrix().reshape(
self.resolution[0], self.resolution[1], self.resolution[2], order='C').T)
return block_geomodeller
class GridClass(object):
"""
-DOCS NOT UPDATED- Class with set of functions to generate grids
Args:
extent (list): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution (list): [nx, ny, nz].
grid_type(str): Type of grid. So far only regular 3D is implemented
"""
def __init__(self, extent, resolution, grid_type="regular_3D"):
self._grid_ext = extent
self._grid_res = resolution
if grid_type == "regular_3D":
self.grid = self.create_regular_grid_3d()
elif grid_type == "regular_2D":
self.grid = self.create_regular_grid_2d()
else:
print("Wrong type")
def create_regular_grid_3d(self):
"""
Method to create a 3D regular grid where is interpolated
Returns:
numpy.ndarray: Unraveled 3D numpy array where every row correspond to the xyz coordinates of a regular grid
"""
g = np.meshgrid(
np.linspace(self._grid_ext[0], self._grid_ext[1], self._grid_res[0], dtype="float32"),
np.linspace(self._grid_ext[2], self._grid_ext[3], self._grid_res[1], dtype="float32"),
np.linspace(self._grid_ext[4], self._grid_ext[5], self._grid_res[2], dtype="float32"), indexing="ij"
)
# self.grid = np.vstack(map(np.ravel, g)).T.astype("float32")
return np.vstack(map(np.ravel, g)).T.astype("float32")
# DEP!
class InterpolatorClass(object):
"""
-DOCS NOT UPDATED- Class which contain all needed methods to perform potential field implicit modelling in theano
Args:
_data(GeMpy_core.DataManagement): All values of a DataManagement object
_grid(GeMpy_core.grid): A grid object
**kwargs: Arbitrary keyword arguments.
Keyword Args:
verbose(int): Level of verbosity during the execution of the functions (up to 5). Default 0
"""
def __init__(self, _data_scaled, _grid_scaled=None, *args, **kwargs):
# verbose is a list of strings. See theanograph
verbose = kwargs.get('verbose', [0])
# -DEP-rescaling_factor = kwargs.get('rescaling_factor', None)
# Here we can change the dtype for stability and GPU vs CPU
dtype = kwargs.get('dtype', 'float32')
self.dtype = dtype
range_var = kwargs.get('range_var', None)
# Drift grade
u_grade = kwargs.get('u_grade', [2, 2])
# We hide the scaled copy of DataManagement object from the user. The scaling happens in gempy what is a
# bit weird. Maybe at some point I should bring the function to this module
self._data_scaled = _data_scaled
# In case someone wants to provide a grid otherwise we extract it from the DataManagement object.
if not _grid_scaled:
self._grid_scaled = _data_scaled.grid
else:
self._grid_scaled = _grid_scaled
# Importing the theano graph. The methods of this object generate different parts of graph.
# See theanograf doc
self.tg = theanograf.TheanoGraph_pro(dtype=dtype, verbose=verbose,)
# Sorting data in case the user provides it unordered
self.order_table()
# Setting theano parameters
self.set_theano_shared_parameteres(range_var=range_var)
# Extracting data from the pandas dataframe to numpy array in the required form for the theano function
self.data_prep(u_grade=u_grade)
# Avoid crashing my pc
import theano
if theano.config.optimizer != 'fast_run':
assert self.tg.grid_val_T.get_value().shape[0] * \
np.math.factorial(len(self.tg.len_series_i.get_value())) < 2e7, \
'The grid is too big for the number of potential fields. Reduce the grid or change the' \
'optimization flag to fast run'
def set_formation_number(self):
"""
Set a unique number to each formation. NOTE: this method is getting deprecated since the user does not need
to know it and also now the numbers must be set in the order of the series as well. Therefore this method
has been moved to the interpolator class as preprocessing
Returns: Column in the interfaces and foliations dataframes
"""
try:
ip_addresses = self._data_scaled.interfaces["formation"].unique()
ip_dict = dict(zip(ip_addresses, range(1, len(ip_addresses) + 1)))
self._data_scaled.interfaces['formation number'] = self._data_scaled.interfaces['formation'].replace(ip_dict)
self._data_scaled.foliations['formation number'] = self._data_scaled.foliations['formation'].replace(ip_dict)
except ValueError:
pass
def order_table(self):
"""
First we sort the dataframes by the series age. Then we set a unique number for every formation and resort
the formations. All inplace
"""
# We order the pandas table by series
self._data_scaled.interfaces.sort_values(by=['order_series'], # , 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
self._data_scaled.foliations.sort_values(by=['order_series'], # , 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
# Give formation number
if not 'formation number' in self._data_scaled.interfaces.columns:
print('I am here')
self.set_formation_number()
# We order the pandas table by formation (also by series in case something weird happened)
self._data_scaled.interfaces.sort_values(by=['order_series', 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
self._data_scaled.foliations.sort_values(by=['order_series', 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
# Pandas dataframe set an index to every row when the dataframe is created. Sorting the table does not reset
# the index. For some of the methods (pn.drop) we have to apply afterwards we need to reset these indeces
self._data_scaled.interfaces.reset_index(drop=True, inplace=True)
def data_prep(self, **kwargs):
"""
Ideally this method will extract the data from the pandas dataframes to individual numpy arrays to be input
of the theano function. However since some of the shared parameters are function of these arrays shape I also
set them here
Returns:
idl (list): List of arrays which are the input for the theano function:
- numpy.array: dips_position
- numpy.array: dip_angles
- numpy.array: azimuth
- numpy.array: polarity
- numpy.array: ref_layer_points
- numpy.array: rest_layer_points
"""
u_grade = kwargs.get('u_grade', None)
# ==================
# Extracting lengths
# ==================
# Array containing the size of every formation. Interfaces
len_interfaces = np.asarray(
[np.sum(self._data_scaled.interfaces['formation number'] == i)
for i in self._data_scaled.interfaces['formation number'].unique()])
# Size of every layer in rests. SHARED (for theano)
len_rest_form = (len_interfaces - 1)
self.tg.number_of_points_per_formation_T.set_value(len_rest_form)
# Position of the first point of every layer
ref_position = np.insert(len_interfaces[:-1], 0, 0).cumsum()
# Drop the reference points using pandas indeces to get just the rest_layers array
pandas_rest_layer_points = self._data_scaled.interfaces.drop(ref_position)
self.pandas_rest_layer_points = pandas_rest_layer_points
# TODO: do I need this? PYTHON
# DEP- because per series the foliations do not belong to a formation but to the whole series
# len_foliations = np.asarray(
# [np.sum(self._data_scaled.foliations['formation number'] == i)
# for i in self._data_scaled.foliations['formation number'].unique()])
# -DEP- I think this was just a kind of print to know what was going on
#self.pandas_rest = pandas_rest_layer_points
# Array containing the size of every series. Interfaces.
len_series_i = np.asarray(
[np.sum(pandas_rest_layer_points['order_series'] == i)
for i in pandas_rest_layer_points['order_series'].unique()])
# Cumulative length of the series. We add the 0 at the beginning and set the shared value. SHARED
self.tg.len_series_i.set_value(np.insert(len_series_i, 0, 0).cumsum())
# Array containing the size of every series. Foliations.
len_series_f = np.asarray(
[np.sum(self._data_scaled.foliations['order_series'] == i)
for i in self._data_scaled.foliations['order_series'].unique()])
# Cumulative length of the series. We add the 0 at the beginning and set the shared value. SHARED
self.tg.len_series_f.set_value(np.insert(len_series_f, 0, 0).cumsum())
# =========================
# Choosing Universal drifts
# =========================
if u_grade is None:
u_grade = np.zeros_like(len_series_i)
u_grade[len_series_i > 12] = 9
u_grade[(len_series_i > 6) & (len_series_i < 12)] = 3
print(u_grade)
# it seems I have to pass list instead array_like that is weird
self.tg.u_grade_T.set_value(list(u_grade))
# ================
# Prepare Matrices
# ================
# Rest layers matrix # PYTHON VAR
rest_layer_points = pandas_rest_layer_points[['X', 'Y', 'Z']].as_matrix()
# TODO delete
# -DEP- Again i was just a check point
# self.rest_layer_points = rest_layer_points
# Ref layers matrix #VAR
# Calculation of the ref matrix and tile. Iloc works with the row number
# Here we extract the reference points
aux_1 = self._data_scaled.interfaces.iloc[ref_position][['X', 'Y', 'Z']].as_matrix()
# We initialize the matrix
ref_layer_points = np.zeros((0, 3))
# TODO I hate loop it has to be a better way
# Tiling very reference points as many times as rest of the points we have
for e, i in enumerate(len_interfaces):
ref_layer_points = np.vstack((ref_layer_points, np.tile(aux_1[e], (i - 1, 1))))
# -DEP- was just a check point
#self.ref_layer_points = ref_layer_points
# Check no reference points in rest points (at least in coor x)
assert not any(aux_1[:, 0]) in rest_layer_points[:, 0], \
'A reference point is in the rest list point. Check you do ' \
'not have duplicated values in your dataframes'
# Foliations, this ones I tile them inside theano. PYTHON VAR
dips_position = self._data_scaled.foliations[['X', 'Y', 'Z']].as_matrix()
dip_angles = self._data_scaled.foliations["dip"].as_matrix()
azimuth = self._data_scaled.foliations["azimuth"].as_matrix()
polarity = self._data_scaled.foliations["polarity"].as_matrix()
# Set all in a list casting them in the chosen dtype
idl = [np.cast[self.dtype](xs) for xs in (dips_position, dip_angles, azimuth, polarity,
ref_layer_points, rest_layer_points)]
return idl
def set_theano_shared_parameteres(self, **kwargs):
"""
Here we create most of the kriging parameters. The user can pass them as kwargs otherwise we pick the
default values from the DataManagement info. The share variables are set in place. All the parameters here
are independent of the input data so this function only has to be called if you change the extent or grid or
if you want to change one the kriging parameters.
Args:
_data_rescaled: DataManagement object
_grid_rescaled: Grid object
Keyword Args:
u_grade (int): Drift grade. Default to 2.
range_var (float): Range of the variogram. Default 3D diagonal of the extent
c_o (float): Covariance at lag 0. Default range_var ** 2 / 14 / 3. See my paper when I write it
nugget_effect (flaot): Nugget effect of foliations. Default to 0.01
"""
# Kwargs
u_grade = kwargs.get('u_grade', 2)
range_var = kwargs.get('range_var', None)
c_o = kwargs.get('c_o', None)
nugget_effect = kwargs.get('nugget_effect', 0.01)
# -DEP- Now I rescale the data so we do not need this
# rescaling_factor = kwargs.get('rescaling_factor', None)
# Default range
if not range_var:
range_var = np.sqrt((self._data_scaled.extent[0] - self._data_scaled.extent[1]) ** 2 +
(self._data_scaled.extent[2] - self._data_scaled.extent[3]) ** 2 +
(self._data_scaled.extent[4] - self._data_scaled.extent[5]) ** 2)
# Default covariance at 0
if not c_o:
c_o = range_var ** 2 / 14 / 3
# Asserting that the drift grade is in this range
# assert (0 <= all(u_grade) <= 2)
# Creating the drift matrix. TODO find the official name of this matrix?
_universal_matrix = np.vstack((self._grid_scaled.grid.T,
(self._grid_scaled.grid ** 2).T,
self._grid_scaled.grid[:, 0] * self._grid_scaled.grid[:, 1],
self._grid_scaled.grid[:, 0] * self._grid_scaled.grid[:, 2],
self._grid_scaled.grid[:, 1] * self._grid_scaled.grid[:, 2]))
# Setting shared variables
# Range
self.tg.a_T.set_value(np.cast[self.dtype](range_var))
# Covariance at 0
self.tg.c_o_T.set_value(np.cast[self.dtype](c_o))
# Foliations nugget effect
self.tg.nugget_effect_grad_T.set_value(np.cast[self.dtype](nugget_effect))
# TODO change the drift to the same style I have the faults so I do not need to do this
# # Drift grade
# if u_grade == 0:
# self.tg.u_grade_T.set_value(u_grade)
# else:
# self.tg.u_grade_T.set_value(u_grade)
# TODO: To be sure what is the mathematical meaning of this -> It seems that nothing
# TODO Deprecated
# self.tg.c_resc.set_value(1)
# Just grid. I add a small number to avoid problems with the origin point
self.tg.grid_val_T.set_value(np.cast[self.dtype](self._grid_scaled.grid + 10e-6))
# Universal grid
self.tg.universal_grid_matrix_T.set_value(np.cast[self.dtype](_universal_matrix + 1e-10))
# Initialization of the block model
self.tg.final_block.set_value(np.zeros((1, self._grid_scaled.grid.shape[0]), dtype='float32'))
# Initialization of the boolean array that represent the areas of the block model to be computed in the
# following series
#self.tg.yet_simulated.set_value(np.ones((_grid_rescaled.grid.shape[0]), dtype='int'))
# Unique number assigned to each lithology
#self.tg.n_formation.set_value(np.insert(_data_rescaled.interfaces['formation number'].unique(),
# 0, 0)[::-1])
self.tg.n_formation.set_value(self._data_scaled.interfaces['formation number'].unique())
# Number of formations per series. The function is not pretty but the result is quite clear
self.tg.n_formations_per_serie.set_value(
np.insert(self._data_scaled.interfaces.groupby('order_series').formation.nunique().values.cumsum(), 0, 0))
def get_kriging_parameters(self, verbose=0):
# range
print('range', self.tg.a_T.get_value(), self.tg.a_T.get_value() * self._data_scaled.rescaling_factor)
# Number of drift equations
print('Number of drift equations', self.tg.u_grade_T.get_value())
# Covariance at 0
print('Covariance at 0', self.tg.c_o_T.get_value())
# Foliations nugget effect
print('Foliations nugget effect', self.tg.nugget_effect_grad_T.get_value())
if verbose > 0:
# Input data shapes
# Lenght of the interfaces series
print('Length of the interfaces series', self.tg.len_series_i.get_value())
# Length of the foliations series
print('Length of the foliations series', self.tg.len_series_f.get_value())
# Number of formation
print('Number of formations', self.tg.n_formation.get_value())
# Number of formations per series
print('Number of formations per series', self.tg.n_formations_per_serie.get_value())
# Number of points per formation
print('Number of points per formation (rest)', self.tg.number_of_points_per_formation_T.get_value())
class InterpolatorInput:
def __init__(self, geo_data, compile_theano=True, compute_all=True, u_grade=None, rescaling_factor=None, **kwargs):
# TODO add all options before compilation in here. Basically this is n_faults, n_layers, verbose, dtype, and \
# only block or all
assert isinstance(geo_data, InputData), 'You need to pass a InputData object'
# Here we can change the dtype for stability and GPU vs CPU
self.dtype = kwargs.get('dtype', 'float32')
#self.in_data = self.rescale_data(geo_data, rescaling_factor=rescaling_factor)
# Set some parameters. TODO posibly this should go in kwargs
self.u_grade = u_grade
# This two properties get set calling rescale data
self.rescaling_factor = None
self.centers = None
self.extent_rescaled = None
# Rescaling
self.data = self.rescale_data(geo_data, rescaling_factor=rescaling_factor)
# Creating interpolator class with all the precompilation options
self.interpolator = self.set_interpolator(**kwargs)
if compile_theano:
self.th_fn = self.compile_th_fn(compute_all=compute_all)
# DEP all options since it goes in set_interpolator
def compile_th_fn(self, compute_all=True, dtype=None, u_grade=None, **kwargs):
"""
Args:
geo_data:
**kwargs:
Returns:
"""
# Choosing float precision for the computation
if not dtype:
if theano.config.device == 'gpu':
dtype = 'float32'
else:
dtype = 'float64'
# We make a rescaled version of geo_data for stability reasons
#data_interp = self.set_interpolator(geo_data, dtype=dtype)
# This are the shared parameters and the compilation of the function. This will be hidden as well at some point
input_data_T = self.interpolator.tg.input_parameters_list()
# This prepares the user data to the theano function
# input_data_P = data_interp.interpolator.data_prep(u_grade=u_grade)
# then we compile we have to pass the number of formations that are faults!!
th_fn = theano.function(input_data_T, self.interpolator.tg.whole_block_model(self.data.n_faults,
compute_all=compute_all),
on_unused_input='ignore',
allow_input_downcast=False,
profile=False)
return th_fn
def rescale_data(self, geo_data, rescaling_factor=None):
"""
Rescale the data of a DataManagement object between 0 and 1 due to stability problem of the float32.
Args:
geo_data: DataManagement object with the real scale data
rescaling_factor(float): factor of the rescaling. Default to maximum distance in one the axis
Returns:
"""
# TODO split this function in compute rescaling factor and rescale z
max_coord = pn.concat(
[geo_data.foliations, geo_data.interfaces]).max()[['X', 'Y', 'Z']]
min_coord = pn.concat(
[geo_data.foliations, geo_data.interfaces]).min()[['X', 'Y', 'Z']]
if not rescaling_factor:
rescaling_factor = 2 * np.max(max_coord - min_coord)
centers = (max_coord + min_coord) / 2
new_coord_interfaces = (geo_data.interfaces[['X', 'Y', 'Z']] -
centers) / rescaling_factor + 0.5001
new_coord_foliations = (geo_data.foliations[['X', 'Y', 'Z']] -
centers) / rescaling_factor + 0.5001
try:
geo_data.interfaces[['X_std', 'Y_std', 'Z_std']] = (geo_data.interfaces[
['X_std', 'Y_std', 'Z_std']]) / rescaling_factor
geo_data.foliations[['X_std', 'Y_std', 'Z_std']] = (geo_data.foliations[
['X_std', 'Y_std', 'Z_std']]) / rescaling_factor
except KeyError:
pass
new_coord_extent = (geo_data.extent - np.repeat(centers, 2)) / rescaling_factor + 0.5001
geo_data_rescaled = copy.deepcopy(geo_data)
geo_data_rescaled.interfaces[['X', 'Y', 'Z']] = new_coord_interfaces
geo_data_rescaled.foliations[['X', 'Y', 'Z']] = new_coord_foliations
geo_data_rescaled.extent = new_coord_extent.as_matrix()
geo_data_rescaled.grid.grid = (geo_data.grid.grid - centers.as_matrix()) / rescaling_factor + 0.5001
self.rescaling_factor = rescaling_factor
geo_data_rescaled.rescaling_factor = rescaling_factor
self.centers = centers
self.extent_rescaled = new_coord_extent
return geo_data_rescaled
# DEP?
def set_airbore_plane(self, z, res_grav):
# Rescale z
z_res = (z-self.centers[2])/self.rescaling_factor + 0.5001
# Create xy meshgrid
xy = np.meshgrid(np.linspace(self.extent_rescaled.iloc[0],
self.extent_rescaled.iloc[1], res_grav[0]),
np.linspace(self.extent_rescaled.iloc[2],
self.extent_rescaled.iloc[3], res_grav[1]))
z = np.ones(res_grav[0]*res_grav[1])*z_res
# Transformation
xy_ravel = np.vstack(map(np.ravel, xy))
airborne_plane = np.vstack((xy_ravel, z)).T.astype(self.dtype)
return airborne_plane
def set_interpolator(self, geo_data = None, *args, **kwargs):
"""
Method to initialize the class interpolator. All the constant parameters for the interpolation can be passed
as args, otherwise they will take the default value (TODO: documentation of the dafault values)
Args:
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments.
Keyword Args:
range_var: Range of the variogram. Default None
c_o: Covariance at 0. Default None
nugget_effect: Nugget effect of the gradients. Default 0.01
u_grade: Grade of the polynomial used in the universal part of the Kriging. Default 2
rescaling_factor: Magic factor that multiplies the covariances). Default 2
Returns:
self.Interpolator (GeMpy_core.Interpolator): Object to perform the potential field method
self.Plot(GeMpy_core.PlotData): Object to visualize data and results. It gets updated.
"""
if 'u_grade' in kwargs:
compile_theano = True
range_var = kwargs.get('range_var', None)
rescaling_factor = kwargs.get('rescaling_factor', None)
#DEP?
#if not getattr(geo_data, 'grid', None):
# set_grid(geo_data)
if geo_data:
geo_data_in = self.rescale_data(geo_data, rescaling_factor=rescaling_factor)
self.data = geo_data_in
else:
geo_data_in = self.data
# First creation
if not getattr(self, 'interpolator', None):
print('I am in the setting')
interpolator = self.InterpolatorClass(geo_data_in, geo_data_in.grid, *args, **kwargs)
# Update
else:
print('I am in update')
self.interpolator._data_scaled = geo_data_in
self.interpolator._grid_scaled = geo_data_in.grid
self.interpolator.order_table()
self.interpolator.set_theano_shared_parameteres(range_var=range_var)
interpolator = None
return interpolator
def update_interpolator(self, geo_data=None, *args, **kwargs):
"""
Update variables without compiling the theano function
Args:
geo_data:
*args:
**kwargs:
Returns:
"""
if 'u_grade' in kwargs:
compile_theano = True
range_var = kwargs.get('range_var', None)
rescaling_factor = kwargs.get('rescaling_factor', None)
if geo_data:
geo_data_in = self.rescale_data(geo_data, rescaling_factor=rescaling_factor)
self.data = geo_data_in
else:
geo_data_in = self.data
print('I am in update')
self.interpolator._data_scaled = geo_data_in
self.interpolator._grid_scaled = geo_data_in.grid
self.interpolator.order_table()
self.interpolator.set_theano_shared_parameteres(range_var=range_var)
def get_input_data(self, u_grade=None):
if not u_grade:
u_grade = self.u_grade
return self.interpolator.data_prep(u_grade=u_grade)
class InterpolatorClass(object):
"""
-DOCS NOT UPDATED- Class which contain all needed methods to perform potential field implicit modelling in theano
Args:
_data(GeMpy_core.DataManagement): All values of a DataManagement object
_grid(GeMpy_core.grid): A grid object
**kwargs: Arbitrary keyword arguments.
Keyword Args:
verbose(int): Level of verbosity during the execution of the functions (up to 5). Default 0
"""
def __init__(self, _data_scaled, _grid_scaled=None, *args, **kwargs):
# verbose is a list of strings. See theanograph
verbose = kwargs.get('verbose', [0])
# -DEP-rescaling_factor = kwargs.get('rescaling_factor', None)
# Here we can change the dtype for stability and GPU vs CPU
dtype = kwargs.get('dtype', 'float32')
self.dtype = dtype
print(self.dtype)
range_var = kwargs.get('range_var', None)
# Drift grade
u_grade = kwargs.get('u_grade', [2, 2])
# We hide the scaled copy of DataManagement object from the user. The scaling happens in gempy what is a
# bit weird. Maybe at some point I should bring the function to this module
self._data_scaled = _data_scaled
# In case someone wants to provide a grid otherwise we extract it from the DataManagement object.
if not _grid_scaled:
self._grid_scaled = _data_scaled.grid
else:
self._grid_scaled = _grid_scaled
# Importing the theano graph. The methods of this object generate different parts of graph.
# See theanograf doc
self.tg = theanograf.TheanoGraph_pro(dtype=dtype, verbose=verbose,)
# Sorting data in case the user provides it unordered
self.order_table()
# Setting theano parameters
self.set_theano_shared_parameteres(range_var=range_var)
# Extracting data from the pandas dataframe to numpy array in the required form for the theano function
self.data_prep(u_grade=u_grade)
# Avoid crashing my pc
import theano
if theano.config.optimizer != 'fast_run':
assert self.tg.grid_val_T.get_value().shape[0] * \
np.math.factorial(len(self.tg.len_series_i.get_value())) < 2e7, \
'The grid is too big for the number of potential fields. Reduce the grid or change the' \
'optimization flag to fast run'
def set_formation_number(self):
"""
Set a unique number to each formation. NOTE: this method is getting deprecated since the user does not need
to know it and also now the numbers must be set in the order of the series as well. Therefore this method
has been moved to the interpolator class as preprocessing
Returns: Column in the interfaces and foliations dataframes
"""
try:
ip_addresses = self._data_scaled.interfaces["formation"].unique()
ip_dict = dict(zip(ip_addresses, range(1, len(ip_addresses) + 1)))
self._data_scaled.interfaces['formation number'] = self._data_scaled.interfaces['formation'].replace(ip_dict)
self._data_scaled.foliations['formation number'] = self._data_scaled.foliations['formation'].replace(ip_dict)
except ValueError:
pass
def order_table(self):
"""
First we sort the dataframes by the series age. Then we set a unique number for every formation and resort
the formations. All inplace
"""
# We order the pandas table by series
self._data_scaled.interfaces.sort_values(by=['order_series'], # , 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
self._data_scaled.foliations.sort_values(by=['order_series'], # , 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
# Give formation number
if not 'formation number' in self._data_scaled.interfaces.columns:
print('I am here')
self.set_formation_number()
# We order the pandas table by formation (also by series in case something weird happened)
self._data_scaled.interfaces.sort_values(by=['order_series', 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
self._data_scaled.foliations.sort_values(by=['order_series', 'formation number'],
ascending=True, kind='mergesort',
inplace=True)
# Pandas dataframe set an index to every row when the dataframe is created. Sorting the table does not reset
# the index. For some of the methods (pn.drop) we have to apply afterwards we need to reset these indeces
self._data_scaled.interfaces.reset_index(drop=True, inplace=True)
def data_prep(self, **kwargs):
"""
Ideally this method will extract the data from the pandas dataframes to individual numpy arrays to be input
of the theano function. However since some of the shared parameters are function of these arrays shape I also
set them here
Returns:
idl (list): List of arrays which are the input for the theano function:
- numpy.array: dips_position
- numpy.array: dip_angles
- numpy.array: azimuth
- numpy.array: polarity
- numpy.array: ref_layer_points
- numpy.array: rest_layer_points
"""
u_grade = kwargs.get('u_grade', None)
# ==================
# Extracting lengths
# ==================
# Array containing the size of every formation. Interfaces
len_interfaces = np.asarray(
[np.sum(self._data_scaled.interfaces['formation number'] == i)
for i in self._data_scaled.interfaces['formation number'].unique()])
# Size of every layer in rests. SHARED (for theano)
len_rest_form = (len_interfaces - 1)
self.tg.number_of_points_per_formation_T.set_value(len_rest_form)
# Position of the first point of every layer
ref_position = np.insert(len_interfaces[:-1], 0, 0).cumsum()
# Drop the reference points using pandas indeces to get just the rest_layers array
pandas_rest_layer_points = self._data_scaled.interfaces.drop(ref_position)
self.pandas_rest_layer_points = pandas_rest_layer_points
# TODO: do I need this? PYTHON
# DEP- because per series the foliations do not belong to a formation but to the whole series
# len_foliations = np.asarray(
# [np.sum(self._data_scaled.foliations['formation number'] == i)
# for i in self._data_scaled.foliations['formation number'].unique()])
# -DEP- I think this was just a kind of print to know what was going on
#self.pandas_rest = pandas_rest_layer_points
# Array containing the size of every series. Interfaces.
len_series_i = np.asarray(
[np.sum(pandas_rest_layer_points['order_series'] == i)
for i in pandas_rest_layer_points['order_series'].unique()])
# Cumulative length of the series. We add the 0 at the beginning and set the shared value. SHARED
self.tg.len_series_i.set_value(np.insert(len_series_i, 0, 0).cumsum())
# Array containing the size of every series. Foliations.
len_series_f = np.asarray(
[np.sum(self._data_scaled.foliations['order_series'] == i)
for i in self._data_scaled.foliations['order_series'].unique()])
# Cumulative length of the series. We add the 0 at the beginning and set the shared value. SHARED
self.tg.len_series_f.set_value(np.insert(len_series_f, 0, 0).cumsum())
# =========================
# Choosing Universal drifts
# =========================
if u_grade is None:
u_grade = np.zeros_like(len_series_i)
u_grade[len_series_i > 12] = 9
u_grade[(len_series_i > 6) & (len_series_i < 12)] = 3
print(u_grade)
# it seems I have to pass list instead array_like that is weird
self.tg.u_grade_T.set_value(list(u_grade))
# ================
# Prepare Matrices
# ================
# Rest layers matrix # PYTHON VAR
rest_layer_points = pandas_rest_layer_points[['X', 'Y', 'Z']].as_matrix()
# TODO delete
# -DEP- Again i was just a check point
# self.rest_layer_points = rest_layer_points
# Ref layers matrix #VAR
# Calculation of the ref matrix and tile. Iloc works with the row number
# Here we extract the reference points
aux_1 = self._data_scaled.interfaces.iloc[ref_position][['X', 'Y', 'Z']].as_matrix()
# We initialize the matrix
ref_layer_points = np.zeros((0, 3))
# TODO I hate loop it has to be a better way
# Tiling very reference points as many times as rest of the points we have
for e, i in enumerate(len_interfaces):
ref_layer_points = np.vstack((ref_layer_points, np.tile(aux_1[e], (i - 1, 1))))
# -DEP- was just a check point
self.ref_layer_points = ref_layer_points
# Check no reference points in rest points (at least in coor x)
assert not any(aux_1[:, 0]) in rest_layer_points[:, 0], \
'A reference point is in the rest list point. Check you do ' \
'not have duplicated values in your dataframes'
# Foliations, this ones I tile them inside theano. PYTHON VAR
dips_position = self._data_scaled.foliations[['X', 'Y', 'Z']].as_matrix()
dip_angles = self._data_scaled.foliations["dip"].as_matrix()
azimuth = self._data_scaled.foliations["azimuth"].as_matrix()
polarity = self._data_scaled.foliations["polarity"].as_matrix()
# Set all in a list casting them in the chosen dtype
idl = [np.cast[self.dtype](xs) for xs in (dips_position, dip_angles, azimuth, polarity,
ref_layer_points, rest_layer_points)]
return idl
def set_theano_shared_parameteres(self, **kwargs):
"""
Here we create most of the kriging parameters. The user can pass them as kwargs otherwise we pick the
default values from the DataManagement info. The share variables are set in place. All the parameters here
are independent of the input data so this function only has to be called if you change the extent or grid or
if you want to change one the kriging parameters.
Args:
_data_rescaled: DataManagement object
_grid_rescaled: Grid object
Keyword Args:
u_grade (int): Drift grade. Default to 2.
range_var (float): Range of the variogram. Default 3D diagonal of the extent
c_o (float): Covariance at lag 0. Default range_var ** 2 / 14 / 3. See my paper when I write it
nugget_effect (flaot): Nugget effect of foliations. Default to 0.01
"""
# Kwargs
u_grade = kwargs.get('u_grade', 2)
range_var = kwargs.get('range_var', None)
c_o = kwargs.get('c_o', None)
nugget_effect = kwargs.get('nugget_effect', 0.01)
# DEP
# compute_all = kwargs.get('compute_all', True)
# -DEP- Now I rescale the data so we do not need this
# rescaling_factor = kwargs.get('rescaling_factor', None)
# Default range
if not range_var:
range_var = np.sqrt((self._data_scaled.extent[0] - self._data_scaled.extent[1]) ** 2 +
(self._data_scaled.extent[2] - self._data_scaled.extent[3]) ** 2 +
(self._data_scaled.extent[4] - self._data_scaled.extent[5]) ** 2)
# Default covariance at 0
if not c_o:
c_o = range_var ** 2 / 14 / 3
# Asserting that the drift grade is in this range
# assert (0 <= all(u_grade) <= 2)
# Creating the drift matrix. TODO find the official name of this matrix?
_universal_matrix = np.vstack((self._grid_scaled.grid.T,
(self._grid_scaled.grid ** 2).T,
self._grid_scaled.grid[:, 0] * self._grid_scaled.grid[:, 1],
self._grid_scaled.grid[:, 0] * self._grid_scaled.grid[:, 2],
self._grid_scaled.grid[:, 1] * self._grid_scaled.grid[:, 2]))
# Setting shared variables
# Range
self.tg.a_T.set_value(np.cast[self.dtype](range_var))
# Covariance at 0
self.tg.c_o_T.set_value(np.cast[self.dtype](c_o))
# Foliations nugget effect
self.tg.nugget_effect_grad_T.set_value(np.cast[self.dtype](nugget_effect))
# TODO change the drift to the same style I have the faults so I do not need to do this
# # Drift grade
# if u_grade == 0:
# self.tg.u_grade_T.set_value(u_grade)
# else:
# self.tg.u_grade_T.set_value(u_grade)
# TODO: To be sure what is the mathematical meaning of this -> It seems that nothing
# TODO Deprecated
# self.tg.c_resc.set_value(1)
# Just grid. I add a small number to avoid problems with the origin point
self.tg.grid_val_T.set_value(np.cast[self.dtype](self._grid_scaled.grid + 10e-6))
# Universal grid
self.tg.universal_grid_matrix_T.set_value(np.cast[self.dtype](_universal_matrix + 1e-10))
# Initialization of the block model
self.tg.final_block.set_value(np.zeros((1, self._grid_scaled.grid.shape[0]), dtype='float32'))
# Initialization of the boolean array that represent the areas of the block model to be computed in the
# following series
#self.tg.yet_simulated.set_value(np.ones((_grid_rescaled.grid.shape[0]), dtype='int'))
# Unique number assigned to each lithology
#self.tg.n_formation.set_value(np.insert(_data_rescaled.interfaces['formation number'].unique(),
# 0, 0)[::-1])
self.tg.n_formation.set_value(self._data_scaled.interfaces['formation number'].unique())
# Number of formations per series. The function is not pretty but the result is quite clear
self.tg.n_formations_per_serie.set_value(
np.insert(self._data_scaled.interfaces.groupby('order_series').formation.nunique().values.cumsum(), 0, 0))
def get_kriging_parameters(self, verbose=0):
# range
print('range', self.tg.a_T.get_value(), self.tg.a_T.get_value() * self._data_scaled.rescaling_factor)
# Number of drift equations
print('Number of drift equations', self.tg.u_grade_T.get_value())
# Covariance at 0
print('Covariance at 0', self.tg.c_o_T.get_value())
# Foliations nugget effect
print('Foliations nugget effect', self.tg.nugget_effect_grad_T.get_value())
if verbose > 0:
# Input data shapes
# Lenght of the interfaces series
print('Length of the interfaces series', self.tg.len_series_i.get_value())
# Length of the foliations series
print('Length of the foliations series', self.tg.len_series_f.get_value())
# Number of formation
print('Number of formations', self.tg.n_formation.get_value())
# Number of formations per series
print('Number of formations per series', self.tg.n_formations_per_serie.get_value())
# Number of points per formation
print('Number of points per formation (rest)', self.tg.number_of_points_per_formation_T.get_value())
| mit |
pmeier82/spikeval | spikeval/plot/plot_cluster.py | 1 | 3106 | # -*- coding: utf-8 -*-
#
# spikeval - plot.plot_cluster.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2012-08-27
#
"""scatter plot for cluster data"""
__docformat__ = 'restructuredtext'
__all__ = ['cluster']
##---IMPORTS
from matplotlib.patches import Ellipse
from .common import COLOURS, gen_fig
##---FUNCTION
def cluster(data, data_dim=(0, 1), plot_mean=True, colours=None, title=None, xlabel=None, ylabel=None):
"""plot a set of clusters with different colors each
:type data: object
:param data: Preferably a dictionary with ndarray entries.
:type data_dim: tuple
:param data_dim: A 2-tuple giving the dimension (entries per datapoint/
columns) to use for the scatter plot of the cluster.
Default=(0,1)
:type plot_mean: bool or float
:param plot_mean: If False, do nothing. If True or positive integer, plot
the cluster means with a strong cross, if positive float, additionally
plot a unit circle of that radius (makes sense for prewhitened pca
data), thus interpreting the value as the std of the cluster.
Default=True
:type colours: list
:param colours: List of colors in any matplotlib conform colour representation.
Default=None
:type title: str
:param title: A title for the plot. No title if None or ''.
:type xlabel: str
:param xlabel: A label for the x-axis. No label if None or ''.
:type ylabel: str
:param ylabel: A label for the y-axis. No label if None or ''.
:rtype: matplotlib.figure.Figure
"""
# init and checks
col_lst = colours or COLOURS
fig = gen_fig()
ax = fig.add_subplot(111)
if not isinstance(data, dict):
data = {'0': data} # str(0) ??
# plot single cluster members
col_idx = 0
for k in sorted(data.keys()):
ax.plot(
data[k][:, data_dim[0]],
data[k][:, data_dim[1]],
marker='.',
lw=0,
c=col_lst[col_idx % len(col_lst)])
col_idx += 1
# plot cluster means
if plot_mean is not False:
col_idx = 0
for k in sorted(data.keys()):
mean_k = data[k][:, data_dim].mean(axis=0)
ax.plot(
[mean_k[0]],
[mean_k[1]],
lw=0,
marker='x',
mfc=col_lst[col_idx % len(col_lst)],
ms=10,
mew=1,
mec='k')
# plot density estimates
if plot_mean is not True:
ax.add_artist(
Ellipse(
xy=mean_k,
width=plot_mean * 2,
height=plot_mean * 2,
facecolor='none',
edgecolor=col_lst[col_idx % len(col_lst)]))
col_idx += 1
# fancy stuff
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# return
return fig
##---MAIN
if __name__ == '__main__':
pass
| mit |
lkilcommons/atmodweb | atmodweb/atmodbackend.py | 1 | 64472 | import sys
import os
import random
import matplotlib.widgets as widgets
import matplotlib.axes
#Main imports
import numpy as np
#import pandas as pd
import sys, pdb, textwrap, itertools
import datetime
#sys.path.append('/home/liamk/mirror/Projects/geospacepy')
#import special_datetime, lmk_utils, satplottools #Datetime conversion class
from matplotlib.figure import Figure
import matplotlib as mpl
import textwrap #for __str__ of ModelRun
from mpl_toolkits.basemap import Basemap
from matplotlib import ticker
from matplotlib.colors import Normalize, LogNorm
import msispy
from collections import OrderedDict
import logging
logging.basicConfig(level=logging.INFO)
try:
from cStringIO import StringIO
except ModuleNotFoundError:
from io import StringIO
class RangeCheckOD(OrderedDict):
"""
OrderedDict subclass that ensures that keys that are numerical are within a certain range
"""
def __init__(self,allowed_range_peer=None):
super(RangeCheckOD,self).__init__()
self.log = logging.getLogger(self.__class__.__name__)
self.allowed_range = dict()
#allow on-the-fly copy of allowed range values from another object
self.allowed_range_peer = allowed_range_peer
def type_sanitize(self,key,val):
"""Check that the value is of the same general type as the previous value"""
oldval = self[key]
for t in [list,dict,float,int,str]:
if isinstance(oldval,t) and not isinstance(val,t):
self.log.debug("Old value for %s was a %s, and %s is not...leaving old value" % (key,str(t),str(val)))
return oldval
else:
return val
return val
def range_correct(self,key,val):
"""Check that the value about to be set at key, is within the specified allowed_range for that key"""
if isinstance(val,np.ndarray):
outrange = np.logical_or(val < float(self.allowed_range[key][0]),val > float(self.allowed_range[key][1]))
val[outrange] = np.nan
#if np.flatnonzero(outrange).shape[0] > 1:
#self.log.debug("%d values were out of range for key %s allowed_range=(%.3f-%.3f)" %(np.flatnonzero(outrange).shape[0],
# key,self.allowed_range[key][0],self.allowed_range[key][1]))
#else:
#self.log.debug("No values were out of range for key %s" % (key))
elif isinstance(val,list):
for k in range(len(val)):
v = val[k]
if v > self.allowed_range[key][1]:
self.log.warn("Attempting to set %dth element of key %s to a value greater than allowed [%s]. setting to max allowed [%s]" % (k,
key,str(v),str(self.allowed_range[key][1])))
val[k] = self.allowed_range[key][1]
elif v < self.allowed_range[key][0]:
self.log.warn("Attempting to set %dth element of key %s to a value greater than allowed [%s] set to min allowed [%s]" % (k,
key,str(v),str(self.allowed_range[key][0])))
val[k] = self.allowed_range[key][0]
elif v >= self.allowed_range[key][0] and v <= self.allowed_range[key][1]:
pass
else:
raise RuntimeError("Nonsensical value in range_correct for index %d of %s: %s, allowed range is %s" % (int(k),key,str(val),str(self.allowed_range[key])))
else: #assume it's a scalar value
if val > self.allowed_range[key][1]:
self.log.warn("Attempting to set key %s to a value greater than allowed [%s],setting to max allowed [%s]" % (key,str(val),str(self.allowed_range[key][1])))
val = self.allowed_range[key][1]
elif val < self.allowed_range[key][0]:
self.log.warn("Attempting to set key %s to a value greater than allowed [%s],setting to min allowed [%s]" % (key,str(val),str(self.allowed_range[key][0])))
val = self.allowed_range[key][0]
elif val >= self.allowed_range[key][0] and val <= self.allowed_range[key][1]:
pass
else:
raise RuntimeError("Nonsensical value in range_correct for %s: %s, allowed values: %s" % (key,str(val),str(self.allowed_range[key])))
return val
def __setitem__(self,key,val):
"""Check that we obey the allowed_range"""
if key in self:
val = self.type_sanitize(key,val)
if self.allowed_range_peer is not None:
self.allowed_range = getattr(self.allowed_range_peer,'allowed_range')
if key not in self.allowed_range:
pass
#self.log.warn("ON SETTING %s has no allowed_range. Skipping range check" %(key))
else:
val = self.range_correct(key,val)
OrderedDict.__setitem__(self,key,val)
def __getitem__(self,key):
item = OrderedDict.__getitem__(self,key)
#if key not in self.allowed_range:
#self.log.warn("ON GETTING %s has no allowed_range." %(key))
return item
def copyasdict(self):
newdict = dict()
for key in self:
newdict[key]=OrderedDict.__getitem__(self,key)
return newdict
def __call__(self):
pass
class ModelRunOD(RangeCheckOD):
"""
Range Checking OrderedDict subclass for storing values for variables and drivers. It also stores an additional dict of units for each driver,
and allowed values for each driver
"""
def __init__(self):
super(ModelRunOD,self).__init__()
self.log = logging.getLogger(self.__class__.__name__)
self.descriptions = dict()
self.units = dict()
def __setitem__(self,key,val):
"""Check that we obey the allowed_range"""
RangeCheckOD.__setitem__(self,key,val)
if key not in self.units:
self.units[key]=None
# self.log.warn("ON SETTING %s has no units. Setting to None" %(key))
if key not in self.descriptions:
self.descriptions[key]=None
# self.log.warn("ON SETTING %s has no description. Setting to None" %(key))
def __getitem__(self,key):
item = RangeCheckOD.__getitem__(self,key)
#if key not in self.allowed_range:
# self.log.warn("ON GETTING %s has no allowed_range." %(key))
#if key not in self.descriptions:
# self.log.warn("ON GETTING %s has no description." %(key))
#if key not in self.units:
# self.log.warn("ON GETTING %s has no units." %(key))
return item
class ModelRunDriversOD(ModelRunOD):
def __init__(self):
super(ModelRunDriversOD,self).__init__()
self.awesome = True
class ModelRunVariablesOD(ModelRunOD):
def __init__(self):
super(ModelRunVariablesOD,self).__init__()
#Add functionality for variable limits
self.lims = RangeCheckOD(allowed_range_peer=self)
self._lims = RangeCheckOD(allowed_range_peer=self)
#allowed_range_peer links allowed_range dictionaries in lims to main allowed_range
self.npts = dict()
class ModelRun(object):
"""
The ModelRun class is a generic class for individual calls to atmospheric models.
The idea is to have individual model classes subclass this one, and add their specific
run code to the 'populate method'.
**The assumptions are:**
* All atmospheric models take as input latitude, longitude and altitude
* User will want data on a 2-d rectangular grid or as column arrays
**The parameters used:**
* **xkey** - string or None
The key into vars,lims, and npts for the variable that represents the 1st dimension of the desired output (x-axis)
* **ykey** - string or None
The key into vars,lims, and npts for the variable that represents the 2nd dimension of the desired output (y-axis)
* **vars** - an OrderedDict (Ordered because it's possible user will always want to iterate in a predetermined order over it's keys)
#. The keys of vars are the names of the data stored in it's values.
#. vars always starts with keys 'Latitude','Longitude', and 'Altitude'
* **lims** - an OrderedDict
The range [smallest,largest] of a particular variable that will be used determine:
#. The range of values of the independant variables (i.e. Latitude, Longitude or Altitude) the model will generate results format
#. The range of values the user could expect a particular output variable to have (i.e to set axes bounds)
* **npts** - an OrderedDict
#. The number of distinct values between the associated lims of particular input variable that will be passed to the model
i.e. (how the grid of input locations will be shaped and how big it will be)
* **drivers** - a Dictionary
Additional inputs that will be passed to the model (using **self.drivers in the model call).
Inititialized to empty, set via the subclass
Subclass best practices:
* In the subclass __init__ method, after calling the superclass **__init__** method, the user should:
* Set any keys in the self.drivers dict that will then be passed a keyword arguments to the model wrapper
* In the populate method, after calling the superclass **populate** method, the user should:
* Call the model using the flattened latitude, longitude, and altitude arrays prepared in the superclass method,
and pass the drivers dict as keyword arguments.
**Example for horizontal wind model subclass:**
.. code-block:: python
def __init__(self):
#This syntax allows for multiple inheritance,
#we don't use it, but it's good practice to use this
#instead of ModelRun.__init__()
super(HWMRun,self).__init__()
#ap - float
# daily AP magnetic index
self.drivers['dt']=datetime.datetime(2000,6,21,12,0,0)
self.drivers['ap']=None
def populate():
super(HWMRun,self).populate()
self.winds,self.drivers = hwmpy.hwm(self.flatlat,self.flatlon,self.flatalt,**self.drivers)
#Now add all the zonal and meridional winds to the dictionary
for w in self.winds:
self.vars[w] = self.winds[w]
#Now make everything into the appropriate shape, if were
#expecting grids. Otherwise make everything into a column vector
if self.shape is None:
self.shape = (self.npts,1)
for v in self.vars:
self.vars[v] = np.reshape(self.vars[v],self.shape)
if v not in ['Latitude','Longitude','Altitude']:
self.lims[v] = [np.nanmin(self.vars[v].flatten()),np.nanmax(self.vars[v].flatten())]
**Operation works like this:**
* Assume that we have a model run subclass call MyModel
* Assume we have an instance of MyModel called mm
#. User (or calling method) decides that they want:
* To plot a GLOBAL grid at an altitude of 110km that is Latitude (50 pts) vs. Longitude (75 pts) vs. Model output
#. They set mm.npts['Latitude']=50 and mm.npts['Longitude']=75 to tell the object what the size of the grid is
#. They call mm.set_x('Latitude'), and mm.set_y('Longitude') to set which dimensions correspond to which variables
#. Since the model also requires an altitude value, they must set mm.vars['Altitude']=110
#. Since they want the grid to be global they set mm.lims['Latitude']=[-90.,90.] and mm.lims['Longitude']=[-180.,180.]
#. Then they call mm.populate() to call the model for their desired grid
**Calling:**
* Getting a value from the ModelRun instance as if it were a dictionary i.e. mm['Latitude'], returns data,limits for
the variable 'Latitude'. Handles differencing any non position variables with another ModelRun instance at mm.peer if mm.peer is not None
**Peering:**
* the peer parameter can be set to another ModelRun instance to return difference between variables in two runs
TODO: Document peering
"""
def __init__(self):
#Attributes which control how we do gridding
#if only one is > 1, then we do vectors
#
self.modelname = None
self.log = logging.getLogger(self.__class__.__name__)
#Determines grid shape
self.xkey = None
self.ykey = None
#the cannocical 'vars' dictionary, which has
#keys which are used to populate the combobox widgets,
self.vars = ModelRunVariablesOD()
self.vars['Latitude']=None
self.vars['Longitude']=None
self.vars['Altitude']=None
#if two are > 1, then we do a grid
self.vars.npts['Latitude']=1
self.vars.npts['Longitude']=1
self.vars.npts['Altitude']=1
#Also set up allowed_ranges, so that we can't accidently
#set latitude to be 100 or longitude to be -1000
self.vars.allowed_range['Latitude'] = [-90.,90.]
self.vars.allowed_range['Longitude'] = [-180.,180.]
self.vars.allowed_range['Altitude'] = [0.,1500.]
#Set up the initial ranges for data grid generation
self.vars.lims['Latitude']=[-90.,90.]
self.vars.lims['Longitude']=[-180.,180.]
self.vars.lims['Altitude']=[0.,400.]
#the _lims dictionary is very similar to the lims, but it is NOT TO BE MODIFIED
#by any outside objects. It records the max and min values of every variable and is
#set when the finalize method is called. The reason it is included at all is so that
#if the user (or another method) changes the lims, we can revert back to something if they
#want that.
for k in self.vars.lims:
self.vars._lims[k] = self.vars.lims[k]
#WTF is this????
#self.vars.allowed_range[k] = self.vars.lims[k]
#The units dictionary simply holds the unit for a variable
self.vars.units['Latitude'] = 'deg'
self.vars.units['Longitude'] = 'deg'
self.vars.units['Altitude'] = 'km'
#The drivers dictionary take input about whatever solar wind parameters drive the model
#These must be either scalars (floats) or lists.
#The keys to this dict must be keyword argument names in the model call
self.drivers = ModelRunDriversOD()
self.log.debug("Class of drivers dict is %s" % (self.drivers.__class__.__name__))
self.shape = None #Tells how to produce gridded output, defaults (if None) to use column vectors
self.totalpts = None #Tells how many total points
self.peer = None #Can be either None, or another ModelRun, allows for comparing two runs
def autoscale_all_lims(self):
for key in self.vars.lims:
self.autoscale_lims(key)
def autoscale_lims(self,key):
if key in self.vars.lims and key in self.vars._lims:
self.log.info("Restoring original bounds (was %s, now %s) for %s" % (str(self.vars.lims[key]),str(self.vars._lims[key]),key))
self.vars.lims[key] = self.vars._lims[key]
else:
raise ValueError("Key %s is not a valid model run variable" % (key))
def hold_constant(self,key):
"""Holds an ephem variable constant by ensuring it's npts is 1s"""
self.log.info("Holding %s constant" % (key))
if key in ['Latitude','Longitude','Altitude']:
self.vars.npts[key] = 1
else:
raise RuntimeError('Cannot hold %s constant, not a variable!'%(key))
def set_x(self,key):
"""Sets an emphem variable as x"""
self.log.info("X is now %s" % (key))
if key in ['Latitude','Longitude','Altitude']:
self.xkey = key
else:
raise RuntimeError('Cannot set %s as x, not a variable!'%(key))
def set_y(self,key):
"""Sets an emphem variable as y"""
self.log.info("Y is now %s" % (key))
if key in ['Latitude','Longitude','Altitude']:
self.ykey = key
else:
raise RuntimeError('Cannot set %s as y, not a variable!'%(key))
def add_compound_var(self,varname,expr,description=None,units=None):
"""
Create a compound variable that incorporates other variables
Inputs:
expr, str:
A (python) mathematical expression involving the keys of other variables
defined in self.vars. Those variables must have been defined
before you add a compound variable involving them.
lims, tup or list:
(min,max) for plots of the variable
allowed_range, tup or list:
(min possible value,max possible value)
#--Not yet added--
#Optional arugments:
# Arbitrary key-value pairs which can also be used to specify named
# constants (i.e. mu0=4*np.pi*1e-7 for the permittivity of free space)
"""
self.log.info('Begining addition of compound variable %s with expression %s' % (varname,expr))
#Sanitize the expression
#If we need numpy expressions
universal_locals = {}
if 'np.' in expr:
universal_locals['np'] = np
#Build up a list of local variables for when we eval the compound variable
eval_locals = OrderedDict()
#Make default descriptions and units strings
unitstr = expr
descstr = expr
#Parse starting with the longest variable names,
#this prevents names in the expression which contain other variables
#i.e. N2 will be parsed out before N
vars_by_longest_first = [key for key in self.vars.keys()]
vars_by_longest_first = sorted(vars_by_longest_first,
key=lambda a: len(a))
parsed_expr = expr
for var in vars_by_longest_first:
if var in parsed_expr:
eval_locals[var]=self.vars[var] #Will be an np array
#Replace the string representing the variable in the expression with
#the value of the unit/decription of the that variable
#i.e. O2/N2 -> [#/cm^3/#/cm^3] and [Molecular Oxygen Number Density/Molecular Nitrogen Number Density]
unitstr = unitstr.replace(var,self.vars.units[var])
descstr = descstr.replace(var,self.vars.descriptions[var])
parsed_expr.replace(var,'')
#It's possible for a variable not to have an allowed_range. If any are missing, then default to no allowed range
#for the compound variable
do_allowed_range = all([hasattr(varRangeCheckOD,'allowed_range') for varRangeCheckOD in [self.vars[key] for key in eval_locals]])
if do_allowed_range:
#Evaluate the expression on all possible combinations of limits/ranges and pick
#the smallest/largest to be the limits/ranges for the compound variable
indcombos = itertools.combinations_with_replacement((0,1),len(eval_locals.keys()))
range_results = []
for combo in indcombos:
lim_locals,range_locals = dict(),dict()
for var,cind in zip(eval_locals.keys(),combo):
range_locals[var] = self.vars.allowed_range[var][cind]
if do_allowed_range:
range_results.update(universal_locals)
range_results.append(eval(expr,range_locals))
self.log.debug('Among vars %s combo %s resulted in \nlimit result: %s\nrange result: %s' % (str(eval_locals.keys()),
str(combo),str(lim_results[-1]),'none' if not do_allowed_range else str(range_results[-1])))
#Check for NaN values in result
eval_locals.update(universal_locals)
compounddata = eval(expr,eval_locals)
compoundnnan = np.count_nonzero(np.logical_not(np.isfinite(compounddata)))
if compoundnnan > 0:
self.log.warn('Warning %d/%d NaN outputs were found in output from formula %s' % (compoundnnan,len(compounddata),expr))
self.vars[varname] = compounddata
self.vars.units[varname] = unitstr if units is None else units
self.vars.descriptions[varname] = descstr if description is None else description
self.vars.lims[varname] = [np.nanmin(compounddata),np.nanmax(compounddata)]
self.vars._lims[varname] = [np.nanmin(compounddata),np.nanmax(compounddata)]
if do_allowed_range:
self.vars.allowed_range[varname] = [np.nanmin(range_results),np.nanmax(range_results)]
self.log.debug('Added compound variable %s with limits: %s,\n allowed_range: %s,\n units: %s,\n description: %s' % (varname,
str(self.vars.lims[varname]),'N/A' if not do_allowed_range else str(self.vars.allowed_range[varname]),
str(self.vars.units[varname]),str(self.vars.descriptions[varname])))
def as_csv(self,varkeys):
"""
Serialize a list of variables as a CSV or JSON string
"""
#Make sure that we don't have any iterables as members of varkeys
#(happens when plotting multiple variables on same axes)
flatkeys = []
for v in varkeys:
if isinstance(v,list) or isinstance(v,tuple):
flatkeys += [vv for vv in v]
else:
flatkeys.append(v)
bigheader = str(self).replace('|','\n')
bigheader += '\n'
onelineheader=''
dats,lims,units,desc = [],[],[],[]
for i,v in enumerate(flatkeys):
var,lim,unit,desc = self[v]
dats.append(var.flatten().reshape((-1,1))) #as a column
bigheader += "%d - %s (%s)[%s]\n" % (i+1,v,desc,unit)
onelineheader += v+','
onelineheader = onelineheader[:-1]
data = np.column_stack(dats)
fakefile = StringIO()
np.savetxt(fakefile,data,delimiter=',',fmt='%10.5e',header=onelineheader,comments='')
return fakefile.getvalue(),bigheader
def populate(self):
"""Populates itself with data"""
#Make sure that everything has the same shape (i.e. either a grid or a vector of length self.npts)
self.log.info("Now populating model run")
#Count up the number of independant variables
nindependent=0
for key in self.vars.npts:
if self.vars.npts[key] > 1:
nindependent+=1
self.shape = (self.vars.npts[key],1)
if nindependent>1: #If gridding set the shape
self.shape = (int(self.vars.npts[self.xkey]),int(self.vars.npts[self.ykey]))
#Populate the ephemeris variables as vectors
for var in ['Latitude','Longitude','Altitude']:
if self.vars.npts[var]>1:
self.vars[var] = np.linspace(self.vars.lims[var][0],self.vars.lims[var][1],self.vars.npts[var])
self.log.debug("Generating %d %s points from %.3f to %.3f" % (self.vars.npts[var],var,self.vars.lims[var][0],self.vars.lims[var][1]))
else:
if self.vars[var] is not None:
print(self.shape,self.vars[var])
self.vars[var] = np.ones(self.shape)*self.vars[var]
else:
raise RuntimeError('Set %s to something first if you want to hold it constant.' % (var))
if nindependent>1:
x = self.vars[self.xkey]
y = self.vars[self.ykey]
X,Y = np.meshgrid(x,y)
self.vars[self.xkey] = X
self.vars[self.ykey] = Y
#Now flatten everything to make the model call
self.flatlat=self.vars['Latitude'].flatten()
self.flatlon=self.vars['Longitude'].flatten()
self.flatalt=self.vars['Altitude'].flatten()
self.totalpts = len(self.flatlat)
def finalize(self):
"""
Call after populate to finish shaping the data and filling the lims dict
"""
#Now make everything into the appropriate shape, if were
#expecting grids. Otherwise make everything into a column vector
if self.shape is None:
self.shape = (self.npts,1)
for v in self.vars:
#Handle the potential for NaN or +-Inf values
good = np.isfinite(self.vars[v])
nbad = np.count_nonzero(np.logical_not(good))
if nbad >= len(self.vars[v])/2:
self.log.warn("Variable %s had more than half missing or infinite data!" % (str(v)))
self.vars[v] = np.reshape(self.vars[v],self.shape)
self.vars._lims[v] = [np.nanmin(self.vars[v].flatten()),np.nanmax(self.vars[v].flatten())]
if v not in ['Latitude','Longitude','Altitude']: #Why do we do this again? Why not the positions? Because the positions create the grid
self.vars.lims[v] = [np.nanmin(self.vars[v].flatten()),np.nanmax(self.vars[v].flatten())]
def __str__(self):
"""
Gives a description of the model settings used to make this run
"""
mystr = "Model: %s|" % (self.modelname)
if self.xkey is not None:
mystr = mystr+"Dimension 1 %s: [%.3f-%.3f][%s]|" % (self.xkey,self.vars.lims[self.xkey][0],self.vars.lims[self.xkey][1],self.vars.units[self.xkey])
if self.ykey is not None:
mystr = mystr+"Dimension 2 %s: [%.3f-%.3f][%s]|" % (self.ykey,self.vars.lims[self.ykey][0],self.vars.lims[self.ykey][1],self.vars.units[self.ykey])
if 'Latitude' not in [self.xkey,self.ykey]:
mystr = mystr+"Latitude held constant at %.3f|" % (self.vars['Latitude'].flatten()[0]) #By this point they will be arrays
if 'Longitude' not in [self.xkey,self.ykey]:
mystr = mystr+"Longitude held constant at %.3f|" % (self.vars['Longitude'].flatten()[0])
if 'Altitude' not in [self.xkey,self.ykey]:
mystr = mystr+"Altitude held constant at %.3f|" % (self.vars['Altitude'].flatten()[0])
for d in self.drivers:
mystr = mystr+"Driver %s: %s[%s]|" % (d,str(self.drivers[d]),str(self.drivers.units[d]))
mystr = mystr+"Generated at: %s" % (datetime.datetime.now().strftime('%c'))
return mystr
def __getitem__(self,key):
"""Easy syntax for returning data"""
if hasattr(key, '__iter__') and not isinstance(key,str): #If key is a sequence of some kind
self.log.debug("Getting multiple variables/limits %s" % (str(key)))
var = []
lim = []
unit = []
desc = []
for k in key:
v,l,u,d = self.__getitem__(k)
var.append(v)
lim.append(l)
unit.append(u)
desc.append(d)
return var,lim,unit,desc
else:
if self.peer is None:
self.log.debug("Getting variables/limits/units/description for %s" % (key))
return self.vars[key],self.vars.lims[key],self.vars.units[key],self.vars.descriptions[key]
else:
if key not in ['Latitude','Longitude','Altitude']:
self.log.info( "Entering difference mode for var %s" % (key))
#Doesn't make sense to difference locations
mydata,mylims = self.vars[key],self.vars.lims[key]
peerdata,peerlims = self.peer[key] #oh look, recursion opportunity!
newdata = mydata-peerdata
newlims = (np.nanmin(newdata.flatten()),np.nanmax(newdata.flatten()))
newunits = 'diff(%s)' % str(self.vars.units[key])
newdesc = self.vars.descriptions[key]+"(difference)"
#newlims = lim_pad(newlims)
return newdata,newlims,newunits,newdesc
else:
return self.vars[key],self.vars.lims[key]
# class IRIRun(ModelRun):
# """ Class for individual calls to IRI """
# #import iripy
# def __init__(self):
# """Initialize HWM ModelRun Subclass"""
# super(IRIRun,self).__init__()
# #HWM DRIVERS
# #ap - float
# # daily AP magnetic index
# #Overwrite the superclass logger
# self.log = logging.getLogger(__name__)
# self.modelname = "International Reference Ionosphere 2011 (IRI-2011)"
# self.modeldesc = textwrap.dedent("""
# The International Reference Ionosphere (IRI) is an international project sponsored by
# the Committee on Space Research (COSPAR) and the International Union of Radio Science (URSI).
# These organizations formed a Working Group (members list) in the late sixties to produce an
# empirical standard model of the ionosphere, based on all available data sources (charter ).
# Several steadily improved editions of the model have been released. For given location, time
# and date, IRI provides monthly averages of the electron density, electron temperature, ion temperature,
# and ion composition in the altitude range from 50 km to 2000 km. Additionally parameters given by IRI
# include the Total Electron Content (TEC; a user can select the starting and ending height of the integral),
# the occurrence probability for Spread-F and also the F1-region, and the equatorial vertical ion drift.
# THE ALTITUDE LIMITS ARE: LOWER (DAY/NIGHT) UPPER ***
# ELECTRON DENSITY 60/80 KM 1000 KM ***
# TEMPERATURES 120 KM 2500/3000 KM ***
# ION DENSITIES 100 KM 1000 KM ***
# (text from: http://iri.gsfc.nasa.gov/)
# """)
# self.drivers['dt']=datetime.datetime(2000,6,21,12,0,0)
# self.drivers.allowed_range['dt'] = [datetime.datetime(1970,1,1),datetime.datetime(2015,4,29,23,59,59)]
# self.drivers.units['dt'] = 'UTC'
# self.drivers['f107']=None
# self.drivers.allowed_range['f107'] = [65.,350.]
# self.drivers.units['f107'] = 'SFU' #No units
# self.drivers.descriptions['f107'] = 'Solar 10.7 cm Flux'
# self.drivers['f107_81']=None
# self.drivers.allowed_range['f107_81'] = [65.,350.]
# self.drivers.units['f107_81'] = 'SFU' #No units
# self.drivers.descriptions['f107_81'] = '81-Day Average Solar 10.7 cm Flux'
# self.vars.allowed_range['Altitude'] = [50.,1500.]
# #Set a more sane default grid range
# self.vars.lims['Altitude'] = [50.,250.]
# def populate(self):
# super(IRIRun,self).populate()
# self.log.info( "Now runing IRI2011 for %s...\n" % (self.drivers['dt'].strftime('%c')))
# self.log.info( "Driver dict is %s\n" % (str(self.drivers)))
# #Call the F2Py Wrapper on the Fortran IRI
# outdata,descriptions,units,outdrivers = iripy.iri_call(self.flatlat,self.flatlon,self.flatalt,
# self.drivers['dt'],f107=self.drivers['f107'],f107_81=self.drivers['f107_81'])
# #Copy the output drivers into the drivers dictionary
# for d in outdrivers:
# self.drivers[d] = outdrivers[d] if isinstance(outdrivers[d],datetime.datetime) else float(outdrivers[d])
# #Now add all ionospheric variables (density, temperature) to the dictionary
# #Also add the units, and descriptions
# for var in outdata:
# self.vars[var] = outdata[var]
# self.vars.units[var] = units[var]
# self.vars.descriptions[var] = descriptions[var]
# #Finish reshaping the data
# self.finalize()
# class HWMRun(ModelRun):
# """ Class for individual calls to HWM """
# #import hwmpy
# def __init__(self):
# """Initialize HWM ModelRun Subclass"""
# super(HWMRun,self).__init__()
# #HWM DRIVERS
# #ap - float
# # daily AP magnetic index
# #Overwrite the superclass logger
# self.log = logging.getLogger(__name__)
# self.modelname = "Horizontal Wind Model 07 (HWM07)"
# self.drivers['dt']=datetime.datetime(2000,6,21,12,0,0)
# self.drivers.allowed_range['dt'] = [datetime.datetime(1970,1,1),datetime.datetime(2015,4,29,23,59,59)]
# self.drivers.units['dt'] = 'UTC'
# self.drivers['ap']=None
# self.drivers.allowed_range['ap'] = [0,400]
# self.drivers.units['ap'] = 'unitless' #No units
# self.vars.allowed_range['Altitude'] = [100.,500.]
# self.vars.lims['Altitude'] = [100.,500.]
# def populate(self):
# super(HWMRun,self).populate()
# self.log.info( "Now runing HWM07 for %s...\n" % (self.drivers['dt'].strftime('%c')))
# self.log.info( "Driver dict is %s\n" % (str(self.drivers)))
# #Call the F2Py Wrapper on the Fortran HWM07
# self.winds,outdrivers = hwmpy.hwm(self.flatlat,self.flatlon,self.flatalt,**self.drivers)
# #Copy the output drivers into the drivers dictionary
# for d in outdrivers:
# self.drivers[d] = outdrivers[d]
# #Now add all the zonal and meridional winds to the dictionary
# #Also add the units
# for w in self.winds:
# self.vars[w] = self.winds[w]
# self.vars.units[w] = 'km/s'
# #Finish reshaping the data
# self.finalize()
#MSIS DRIVERS
#f107 - float
# daily f10.7 flux for previous day
#ap_daily - float
# daily AP magnetic index
#f107a - optional,float
# 81 day average of f10.7 flux (centered on date)
#ap3 - optional, float
# 3 hour AP for current time
#ap33 - optional, float
# 3 hour AP for current time - 3 hours
#ap36 - optional, float
# 3 hour AP for current time - 6 hours
#ap39 - optional, float
# 3 hour AP for current time - 9 hours
#apa1233 - optional, float
# Average of eight 3 hour AP indicies from 12 to 33 hrs prior to current time
#apa3657
# Average of eight 3 hour AP indices from 36 to 57 hours prior to current time
class MsisRun(ModelRun):
""" Class for individual calls to NRLMSISE00 """
import msispy
def __init__(self):
"""ModelRun subclass which adds MSIS to GUI"""
super(MsisRun,self).__init__()
#Overwrite the superclass logger
self.log = logging.getLogger(self.__class__.__name__)
self.modelname = "NRLMSISE00"
self.modeldesc = textwrap.dedent("""
This version of the venerable mass-spectrometer and incoherent scatter radar model
also incorporates mass density data derived from drag measurements and orbit determination.
It includes the same database as the Jacchia family of models, and has been seen to outperform
both the older MSIS90 and the ubiquitous Jacchia-70. It's purpose is to specify the mass-density,
temperature and neutral species composition from the ground to the bottom of the exosphere
(around 1400km altitude). It provides number densities for the major neutral atmosphere constituents:
atomic and molecular nitrogen and oxygen, argon, helium and hydrogen. Additionally it includes a
species referred to as anomalous oxygen which includes O+ ion and hot atomic oxygen,
which was added to model these species' significant contributions to satellite drag at high latitude
and altitude, primarily during the summer months [picone]. The model inputs are the location, date,
and time of day, along with the 10.7 cm solar radio flux (F10.7) and the AP planetary activity index.
NOTES ON INPUT VARIABLES:
C UT, Local Time, and Longitude are used independently in the
C model and are not of equal importance for every situation.
C For the most physically realistic calculation these three
C variables should be consistent (STL=SEC/3600+GLONG/15).
C The Equation of Time departures from the above formula
C for apparent local time can be included if available but
C are of minor importance.
c
C F107 and F107A values used to generate the model correspond
C to the 10.7 cm radio flux at the actual distance of the Earth
C from the Sun rather than the radio flux at 1 AU. The following
C site provides both classes of values:
C ftp://ftp.ngdc.noaa.gov/STP/SOLAR_DATA/SOLAR_RADIO/FLUX/
""")
self.drivers['dt']=datetime.datetime(2000,6,21,12,0,0)
self.drivers.allowed_range['dt'] = [datetime.datetime(1970,1,1,0,0,0),msispy.latest_f107ap_datetime]
self.drivers.descriptions['dt'] = 'Date and time of model run'
self.drivers['f107']=None
self.drivers.allowed_range['f107'] = [65.,350.]
self.drivers.units['f107'] = 'SFU'
self.drivers.descriptions['f107'] = 'Solar 10.7 cm Flux'
self.drivers['ap_daily']=None
self.drivers.allowed_range['ap_daily'] = [0.,400.]
self.drivers.units['ap_daily'] = 'unitless' #No units
self.drivers.descriptions['ap_daily'] = 'AP planetary activity index'
self.drivers['f107a']=None
self.drivers.allowed_range['f107a'] = [65.,350.]
self.drivers.units['f107a'] = 'SFU' #10^-22 W/m2/Hz'
self.drivers.descriptions['f107a'] = '81-day Average Solar 10.7 cm Flux'
#Warning: if you don't define this you will be restricted to
#0 to 400 km, which is the default set in the above function
self.vars.allowed_range['Altitude'] = [0.,1000.]
def populate(self):
super(MsisRun,self).populate()
self.log.info( "Now runing NRLMSISE00 for %s...\n" % (self.drivers['dt'].strftime('%c')))
self.log.info( "Driver dict is %s\n" % (str(self.drivers)))
self.species,self.t_exo,self.t_alt,units,descriptions,outdrivers = msispy.msis(self.flatlat,self.flatlon,self.flatalt,**self.drivers)
#Copy the output drivers into the drivers dictionary
for d in outdrivers:
self.drivers[d] = outdrivers[d]
#Now add temperature the variables dictionary
self.vars['T_exo'] = self.t_exo
self.vars.units['T_exo'] = 'K'
self.vars.descriptions['T_exo'] = 'Exospheric Temperature'
self.vars['Temperature'] = self.t_alt
self.vars.units['Temperature'] = 'K'
#Now add all of the different number and mass densities to to the vars dictionary
for s in self.species:
self.vars[s] = self.species[s]
if s == 'mass':
self.vars.units[s] = 'g/cm^3'
self.vars.descriptions[s] = 'Mass Density'
else:
self.vars.units[s] = '1/cm^3'
self.vars.descriptions[s] = 'Number Density of %s' % (s)
self.finalize()
self.add_compound_var('ON2ratio','O/N2',units='unitless',description='Atomic Oxygen/Molecular Nitrogen Ratio')
class ModelRunner(object):
""" Makes model calls """
def __init__(self,canvas=None,firstmodel="msis"):
self.log = logging.getLogger(self.__class__.__name__)
self.cv = canvas
self.model = firstmodel
#Init the runs list (holds each run in sequence)
self.runs = []
#Start with a blank msis run
self.init_nextrun()
self.nextrun.drivers['dt'] = datetime.datetime(2000,6,21,12,0,0) #Summer solstice
#Set counters
self.n_total_runs=0
self.n_max_runs=10
self._lastind=-1 #The index of the current 'previous' model
self._differencemode = False # Init the property
#Create the dictionary that stores the settings for the next run
@property
def lastind(self):
return self._lastind
@lastind.setter
def lastind(self, value):
if self.lastind < 0 and self.lastind >= -1*len(self.runs):
self._lastind = value
else:
self.log.warn("Attempted to set model history index to %s, which is invalid." % ( str(value)))
def init_nextrun(self):
if self.model.lower() == 'msis':
self.nextrun = MsisRun()
#elif self.model.lower() == 'hwm':
# self.nextrun = HWMRun()
#elif self.model.lower() == 'iri':
# self.nextrun = IRIRun()
else:
raise ValueError("%s is not a valid model to run" % (self.model))
def __call__(self,propagate_drivers=False):
#Add to runs list, create a new nextrun
self.runs.append(self.nextrun)
self.init_nextrun()
if propagate_drivers:
for key in self.runs[-1].drivers:
if key in self.nextrun.drivers and self.nextrun.drivers[key] is None:
self.nextrun.drivers[key] = self.runs[-1].drivers[key]
if self.differencemode:
#Update the peering
self.nextrun.peer = self.runs[-1].peer
else:
for run in self.runs:
run.peer = None
self.n_total_runs+=1
self.log.info( "Model run number %d added." % (self.n_total_runs))
if len(self.runs)>self.n_max_runs:
del self.runs[0]
self.log.info( "Exceeded total number of stored runs %d. Run %d removed" %(self.n_max_runs,self.n_total_runs-self.n_max_runs))
#Implement a simple interface for retrieving data, which is opaque to whether or now we're differencing
#or which model we're using
def __getitem__(self,key):
"""Shorthand for self.runs[-1][key], which returns self.runs[-1].vars[key],self.runs[-1].lims[key]"""
return self.runs[self.lastind][key]
def __setitem__(self,key,value):
"""Shorthand for self.nextrun[key]=value"""
self.nextrun.key=value
#Make a property for the difference mode turning on or off, which automatically updates the last run peer
@property
def differencemode(self):
return self._differencemode
@differencemode.setter
def differencemode(self, boo):
self.log.info( "Difference mode is now %s" % (str(boo)))
if boo:
self.nextrun.peer = self.runs[-1]
else:
self.nextrun.peer = None
self._differencemode = boo
class PlotDataHandler(object):
def __init__(self,canvas,controlstate=None,plottype='line',cscale='linear',mapproj='moll'):
"""
Takes a singleMplCanvas instance to associate with
and plot on
"""
self.canvas = canvas
self.log = logging.getLogger(self.__class__.__name__)
if controlstate is None:
self.controlstate = canvas.controlstate #This is for atmodexplorer, where there's only one controlstate
#and it's associated with the canvas
else:
self.controlstate = controlstate # This is for atmodweb, where the controlstate is associated with the synchronizer
self.fig = canvas.fig
self.ax = canvas.ax
self.axpos = canvas.ax.get_position()
pts = self.axpos.get_points().flatten() # Get the extent of the bounding box for the canvas axes
self.cbpos = None
self.cb = None
self.map_lw=.5 #Map linewidth
self.map = None #Place holder for map instance if we're plotting a map
#The idea is to have all plot settings described in this class,
#so that if we want to add a new plot type, we only need to modify
#this class
self.supported_projections={'mill':'Miller Cylindrical','moll':'Mollweide','ortho':'Orthographic'}
self.plottypes = dict()
self.plottypes['line'] = {'gridxy':False,'overplot_ready':True,'x_allowed':['all'],'y_allowed':['all'],'z_allowed':['none']}
self.plottypes['pcolor'] = {'gridxy':True,'overplot_ready':False,'x_allowed':['position'],'y_allowed':['position'],'z_allowed':['notposition']}
self.plottypes['map'] = {'gridxy':True,'overplot_ready':False,'x_allowed':['Longitude'],'y_allowed':['Latitude'],'z_allowed':['notposition']}
if plottype not in self.plottypes:
raise ValueError('Invalid plottype %s! Choose from %s' % (plottype,str(self.plottypes.keys())))
#Assign settings from input
self.plottype = plottype
self.mapproj = mapproj # Map projection for Basemap
#Init the data variables
self.clear_data()
def caption(self):
#Generates a description of the graph
cap = "%s" % (str(self.xname) if not self.xlog else "log(%s)" % (str(self.xname)))
cap = cap + " vs. %s" % (str(self.yname) if not self.ylog else "log(%s)" % (str(self.yname)))
#if self.plottype == 'pcolor':
# cap = "Pseudocolor plot of "+ cap + " vs. %s" % (str(self.zname) if not self.zlog else "log(%s)" % (str(self.zname)))
if self.plottype == 'map':
cap = "%s projection map of " % (self.supported_projections[self.mapproj])+ cap + \
" vs. %s" % (str(self.zname) if not self.zlog else "log(%s)" % (str(self.zname)))
return cap
def clear_data(self):
self.log.info("Clearing PlotDataHandler data NOW.")
self.x,self.y,self.z = None,None,None #must be np.array
self.xname,self.yname,self.zname = None,None,None #must be string
self.xbounds,self.ybounds,self.zbounds = None,None,None #must be 2 element tuple
self.xlog, self.ylog, self.zlog = False, False, False
self.xunits, self.yunits, self.zunits = None, None, None
self.xdesc, self.ydesc, self.zdesc = None, None, None
self.npts = None
self.statistics = None # Information about each plotted data
def associate_data(self,varxyz,vardata,varname,varbounds,varlog,multi=False,units=None,description=None):
#Sanity check
if not multi:
thislen = len(vardata.flatten())
thisshape = vardata.shape
else:
thislen = len(vardata[0].flatten())
thisshape = vardata[0].shape
#Make sure all the arguments have same length, even if left as default
if not hasattr(units,'__iter__') and not isinstance(units,str) : #if it isn't a list, make it one
units = [units for i in range(len(vardata))]
if not hasattr(description,'__iter__') and not isinstance(description,str): #if it isn't a list, make it one
description = [description for i in range(len(vardata))]
#Check total number of points
if self.npts is not None:
if thislen != self.npts:
raise RuntimeError('Variable %s passed for axes %s had wrong flat length, got %d, expected %d' % (varname,varaxes,
thislen,self.npts))
#Check shape
for v in [self.x[0] if self.multix else self.x,self.y[0] if self.multiy else self.y,self.z]:
if v is not None:
if v.shape != thisshape:
raise RuntimeError('Variable %s passed for axes %s had mismatched shape, got %s, expected %s' % (varname,varaxes,
str(thisshape),str(v.shape)))
#Parse input and assign approriate variable
if varxyz in ['x','X',0,'xvar']:
self.x = vardata
self.xname = varname
self.xbounds = varbounds
self.xlog = varlog
self.xmulti = multi
self.xunits = units
self.xdesc = description
elif varxyz in ['y','Y',1,'yvar']:
self.y = vardata
self.yname = varname
self.ybounds = varbounds
self.ylog = varlog
self.ymulti = multi
self.yunits = units
self.ydesc = description
elif varxyz in ['z','Z',2,'C','c','zvar']:
self.z = vardata
self.zname = varname
self.zbounds = varbounds
self.zlog = varlog
self.zunits = units
self.zdesc = description
else:
raise ValueError('%s is not a valid axes for plotting!' % (str(varaxes)))
def compute_statistics(self):
self.statistics = OrderedDict()
if self.plottype=='line':
if not self.xmulti:
self.statistics['Mean-%s'%(self.xname)]=np.nanmean(self.x)
self.statistics['Median-%s'%(self.xname)]=np.median(self.x)
self.statistics['StDev-%s'%(self.xname)]=np.nanstd(self.x)
else:
for n in range(len(self.x)):
self.statistics['Mean-%s'%(self.xname[n])]=np.nanmean(self.x[n])
self.statistics['Median-%s'%(self.xname[n])]=np.median(self.x[n])
self.statistics['StDev-%s'%(self.xname[n])]=np.nanstd(self.x[n])
if not self.ymulti:
self.statistics['Mean-%s'%(self.yname)]=np.nanmean(self.y)
self.statistics['Median-%s'%(self.yname)]=np.median(self.y)
self.statistics['StDev-%s'%(self.yname)]=np.nanstd(self.y)
else:
for n in range(len(self.y)):
self.statistics['Mean-%s'%(self.yname[n])]=np.nanmean(self.y[n])
self.statistics['Median-%s'%(self.yname[n])]=np.median(self.y[n])
self.statistics['StDev-%s'%(self.yname[n])]=np.nanstd(self.y[n])
#elif self.plottype=='map' or self.plottypes=='pcolor':
elif self.plottype=='map':
self.statistics['Mean-%s'%(self.zname)]=np.nanmean(self.z)
self.statistics['Median-%s'%(self.zname)]=np.median(self.z)
self.statistics['StDev-%s'%(self.zname)]=np.nanstd(self.z)
if self.plottype=='map':
self.statistics['Geo-Integrated-%s'%(self.zname)]=self.integrate_z()
def integrate_z(self):
#If x and y are longitude and latitude
#integrates z over the grid
if self.xname=='Longitude':
lon=self.x.flatten()
elif self.yname=='Longitude':
lon=self.y.flatten()
else:
return np.nan
if self.xname=='Latitude':
lat=self.x.flatten()
elif self.yname=='Latitude':
lat=self.y.flatten()
else:
return np.nan
#a little hack to get the altitude
alt = self.controlstate['alt']
r_km = 6371.2+alt
zee = self.z.flatten()
zint = 0.
for k in range(len(lat)-1):
theta1 = (90.-lat[k])/180.*np.pi
theta2 = (90.-lat[k+1])/180.*np.pi
dphi = (lon[k+1]-lon[k])/180.*np.pi
zint += (zee[k]+zee[k+1])/2.*np.abs(r_km**2*dphi*(np.cos(theta1)-np.cos(theta2)))#area element
return zint
def plot(self,*args,**kwargs):
if self.map is not None:
self.map = None #Make sure that we don't leave any maps lying around if we're not plotting maps
#print "self.ax: %s\n" % (str(self.ax.get_position()))
#print "All axes: \n"
#for i,a in enumerate(self.fig.axes):
# print "%d: %s" % (i,str(a.get_position()))
if self.statistics is None:
self.log.debug("Computing statistics")
self.compute_statistics()
if self.cb is not None:
#logging.info("removing self.cb:%s\n" % (str(self.cb.ax.get_position()))
self.log.debug("Removing self.cb")
self.cb.remove()
self.cb = None
self.ax.cla()
self.ax.get_xaxis().set_visible(True)
self.ax.get_yaxis().set_visible(True)
self.fig.suptitle('')
#Check that we actually have something to plot
#If we have less that 50% of data, add warnings and don't plot anything
self.ax.set_xlim([-1,1])
self.ax.set_ylim([-1,1])
if self.z is not None:
if np.count_nonzero(np.isfinite(self.z)) < len(self.z)/2:
self.ax.text(0,0,"%s is unavailable for this position/altitude" % ((self.zname)),
ha='center',va='center',color="r")
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
return
if self.x is not None:
xlist = [self.x] if not self.xmulti else self.x
xnamelist = [self.xname] if not self.xmulti else self.xname
for i in range(len(xlist)):
if np.count_nonzero(np.isfinite(xlist[i])) < len(xlist[i])/2:
self.ax.text(0,0,"%s is unavailable for this position/altitude" % ((xnamelist[i])),
ha='center',va='center',color="r")
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
return
if self.y is not None:
ylist = [self.y] if not self.ymulti else self.y
ynamelist = [self.yname] if not self.ymulti else self.yname
for i in range(len(ylist)):
if np.count_nonzero(np.isfinite(ylist[i])) < len(ylist[i])/2:
self.ax.text(0,0,"%s is unavailable for this position/altitude" % ((ynamelist[i])),
ha='center',va='center',color="r")
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
return
#self.zbounds = (np.nanmin(self.z),np.nanmax(self.z))
if self.zlog:
self.log.debug("Z var set to use log scale")
self.z[self.z<=0.] = np.nan
norm = LogNorm(vmin=self.zbounds[0],vmax=self.zbounds[1])
locator = ticker.LogLocator()
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
else:
norm = Normalize(vmin=self.zbounds[0],vmax=self.zbounds[1])
if self.plottype == 'line':
self.log.debug("Plottype is line")
#Plot a simple 2d line plot
if self.cb is not None:
self.cb.remove()
self.cb = None
#self.ax.set_position(self.axpos)
if not self.xmulti and not self.ymulti: #No overplotting
self.log.debug("No multiplotting is requested: X var %s, Y var %s" % (str(self.xname),str(self.yname)))
self.ax.plot(self.x,self.y,*args,**kwargs)
xbnds = self.xbounds
ybnds = self.ybounds
xnm = self.xname if self.xdesc is None else self.xdesc
xnm += '' if self.xunits is None else '[%s]' % (str(self.xunits))
ynm = self.yname if self.ydesc is None else self.ydesc
ynm += '' if self.yunits is None else '[%s]' % (str(self.yunits))
elif self.xmulti and not self.ymulti: #Overplotting xvars
self.log.debug("X multiplotting is requested: X vars %s, Y var %s" % (str(self.xname),str(self.yname)))
xbnds = self.xbounds[0]
ybnds = self.ybounds
xnm = ''
endut = self.xunits[0]
for i in range(len(self.xname)):
nm,ut = self.xname[i],self.xunits[i]
if ut is not None and ut != endut:
xnm += nm+'[%s]'%(str(ut))+','
else:
xnm += nm+','
xnm = xnm[:-1] #Remove last comma
xnm += '[%s]' % (endut) if endut is not None else ''
print(xnm)
print(self.xbounds)
ynm = self.yname if self.ydesc is None else self.ydesc
ynm += '' if self.yunits is None else '[%s]' % (str(self.yunits))
for i in range(len(self.x)):
self.ax.plot(self.x[i],self.y,label=self.xname[i],*args,**kwargs) #should cycle through colors
#self.ax.hold(True)
#Compute new bounds as incuding all bounds
xbnds[0] = xbnds[0] if xbnds[0]<self.xbounds[i][0] else self.xbounds[i][0]
xbnds[1] = xbnds[1] if xbnds[1]>self.xbounds[i][1] else self.xbounds[i][1]
elif self.ymulti and not self.xmulti: #Overplotting yvars
self.log.debug("Y multiplotting is requested: X var %s, Y vars %s" % (str(self.xname),str(self.yname)))
ybnds = self.ybounds[0]
xbnds = self.xbounds
xnm = self.xname
xnm += '' if self.xunits is None else '[%s]' % (str(self.xunits))
ynm = ''
endut = self.yunits[0]
for i in range(len(self.yname)):
nm,ut = self.yname[i],self.yunits[i]
if ut is not None and ut != endut:
ynm += nm+'[%s]'%(str(ut))+','
else:
ynm += nm+','
ynm = ynm[:-1] #Remove last comma
ynm += '[%s]' % (endut) if endut is not None else ''
for i in range(len(self.y)):
self.ax.plot(self.x,self.y[i],label=self.yname[i],*args,**kwargs) #should cycle through colors
#self.ax.hold(True)
#Compute new bounds as incuding all bounds
print(self.ybounds[i])
ybnds[0] = ybnds[0] if ybnds[0]<self.ybounds[i][0] else self.ybounds[i][0]
ybnds[1] = ybnds[1] if ybnds[1]>self.ybounds[i][1] else self.ybounds[i][1]
#Set axes appearance and labeling
self.ax.set_xlabel(xnm)
if self.xlog:
self.ax.set_xscale('log',nonposx='clip')
self.ax.get_xaxis().get_major_formatter().labelOnlyBase = False
self.ax.set_xlim(xbnds)
self.log.debug("Setting bounds for X var %s, %s" % (str(self.xname),str(xbnds)))
self.ax.set_ylabel(ynm)
if self.ylog:
self.ax.set_yscale('log',nonposx='clip')
self.ax.get_yaxis().get_major_formatter().labelOnlyBase = False
self.ax.set_ylim(ybnds)
self.log.debug("Setting bounds for Y var %s, %s" % (str(self.yname),str(ybnds)))
if self.xmulti or self.ymulti:
self.ax.legend()
#self.ax.set_ylim(0,np.log(self.ybounds[1]))
self.ax.set_title('%s vs. %s' % (xnm if not self.xlog else 'log(%s)'%(xnm),ynm if not self.ylog else 'log(%s)'%(ynm)),y=1.08)
self.ax.grid(True,linewidth=.1)
if not self.xlog and not self.ylog:
self.ax.set_aspect(1./self.ax.get_data_ratio())
self.ax.set_position([.15,.15,.75,.75])
#try:
# self.fig.tight_layout()
#except:
# print "Tight layout for line failed"
# elif self.plottype == 'pcolor':
# self.log.info("Plottype is pcolor for vars:\n--X=%s lims=(%s)\n--Y=%s lims=(%s)\n--C=%s lims=(%s)" % (str(self.xname),str(self.xbounds),
# str(self.yname),str(self.ybounds),str(self.zname),str(self.zbounds)))
# xnm = self.xname if self.xdesc is None else self.xdesc
# xnm += '' if self.xunits is None else '[%s]' % (str(self.xunits))
# ynm = self.yname if self.ydesc is None else self.ydesc
# ynm += '' if self.yunits is None else '[%s]' % (str(self.yunits))
# znm = self.zname if self.zdesc is None else self.zdesc
# znm += '' if self.zunits is None else '[%s]' % (str(self.zunits))
# #nn = np.isfinite(self.x)
# #nn = np.logical_and(nn,np.isfinite(self.y))
# #nn = np.logical_and(nn,np.isfinite(self.z))
# mappable = self.ax.pcolormesh(self.x,self.y,self.z,norm=norm,shading='gouraud',**kwargs)
# #m.draw()
# self.ax.set_xlabel(xnm)
# if self.xlog:
# self.ax.set_xscale('log',nonposx='clip')
# self.ax.set_xlim(self.xbounds)
# self.ax.set_ylabel(ynm)
# if self.ylog:
# self.ax.set_xscale('log',nonposx='clip')
# self.ax.set_ylim(self.ybounds)
# if self.zlog: #Locator goes to ticks argument
# self.cb = self.fig.colorbar(mappable,ax=self.ax,orientation='horizontal',format=formatter,ticks=locator)
# else:
# self.cb = self.fig.colorbar(mappable,ax=self.ax,orientation='horizontal')
# #self.ax.set_position(self.axpos)
# #self.cb.ax.set_position(self.cbpos)
# self.cb.ax.set_position([.1,0,.8,.15])
# self.ax.set_position([.1,.25,.8,.7])
# self.cb.set_label(znm)
# self.ax.set_aspect(1./self.ax.get_data_ratio())
# self.ax.set_title('%s vs. %s (color:%s)' % (xnm,ynm,
# znm if not self.zlog else 'log(%s)'% znm))
elif self.plottype == 'map':
#Basemap is too screwy for partial maps
#self.ybounds = [-90.,90.]
#self.xbounds = [-180.,180.]
znm = self.zname if self.zdesc is None else self.zdesc
znm += '' if self.zunits is None else '[%s]' % (str(self.zunits))
self.log.info("Plottype is %s projection MAP for vars:\n--X=%s lims=(%s)\n--Y=%s lims=(%s)\n--C=%s lims=(%s)" % (str(self.mapproj),str(self.xname),str(self.xbounds),
str(self.yname),str(self.ybounds),str(self.zname),str(self.zbounds)))
if self.mapproj=='moll':
m = Basemap(projection=self.mapproj,llcrnrlat=int(self.ybounds[0]),urcrnrlat=int(self.ybounds[1]),\
llcrnrlon=int(self.xbounds[0]),urcrnrlon=int(self.xbounds[1]),lat_ts=20,resolution='c',ax=self.ax,lon_0=0.)
elif self.mapproj=='mill':
m = Basemap(projection=self.mapproj,llcrnrlat=int(self.ybounds[0]),urcrnrlat=int(self.ybounds[1]),\
llcrnrlon=int(self.xbounds[0]),urcrnrlon=int(self.xbounds[1]),lat_ts=20,resolution='c',ax=self.ax)
elif self.mapproj=='ortho':
m = Basemap(projection='ortho',ax=self.ax,lat_0=int(self.controlstate['lat']),
lon_0=int(self.controlstate['lon']),resolution='l')
m.drawcoastlines(linewidth=self.map_lw)
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,91.,15.),linewidth=self.map_lw)
m.drawmeridians(np.arange(-180.,181.,30.),linewidth=self.map_lw)
if self.zlog:
mappable = m.pcolormesh(self.x,self.y,self.z,linewidths=1.5,latlon=True,norm=norm,
vmin=self.zbounds[0],vmax=self.zbounds[1],shading='gouraud',**kwargs)
else:
mappable = m.pcolormesh(self.x,self.y,self.z,linewidths=1.5,latlon=True,norm=norm,
vmin=self.zbounds[0],vmax=self.zbounds[1],shading='gouraud',**kwargs)
latbounds = [self.ybounds[0],self.ybounds[1]]
lonbounds = [self.xbounds[0],self.xbounds[1]]
lonbounds[0],latbounds[0] = m(lonbounds[0],latbounds[0])
lonbounds[1],latbounds[1] = m(lonbounds[1],latbounds[1])
#self.ax.set_ylim(latbounds)
#self.ax.set_xlim(lonbounds)
#m.draw()
if self.zlog:
self.cb = self.fig.colorbar(mappable,ax=self.ax,orientation='horizontal',format=formatter,ticks=locator)
else:
self.cb = self.fig.colorbar(mappable,ax=self.ax,orientation='horizontal')
#m.set_axes_limits(ax=self.canvas.ax)
if self.mapproj == 'mill':
self.ax.set_xlim(lonbounds)
self.ax.set_ylim(latbounds)
self.cb.ax.set_position([.1,.05,.8,.15])
self.ax.set_position([.1,.2,.8,.7])
elif self.mapproj == 'moll':
self.cb.ax.set_position([.1,.05,.8,.13])
self.ax.set_position([.05,.2,.9,.9])
self.cb.set_label(znm)
self.ax.set_title('%s Projection Map of %s' % (self.supported_projections[self.mapproj],
znm if not self.zlog else 'log(%s)' % (znm)),y=1.08)
self.map = m
#Now call the canvas cosmetic adjustment routine
self.canvas.apply_lipstick()
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/animation/old_animation/dynamic_image_wxagg2.py | 10 | 3037 | #!/usr/bin/env python
"""
Copyright (C) 2003-2005 Jeremy O'Donoghue and others
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
http://www.python.org/psf/license.html
"""
import sys, time, os, gc
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import rcParams
import numpy as npy
import matplotlib.cm as cm
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from wx import *
TIMER_ID = NewId()
class PlotFigure(Frame):
def __init__(self):
Frame.__init__(self, None, -1, "Test embedded wxFigure")
self.fig = Figure((5,4), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = BoxSizer(VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, LEFT|TOP|GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, GROW)
self.SetSizer(sizer)
self.Fit()
EVT_TIMER(self, TIMER_ID, self.onTimer)
def init_plot_data(self):
# jdh you can add a subplot directly from the fig rather than
# the fig manager
a = self.fig.add_axes([0.075,0.1,0.75,0.85])
cax = self.fig.add_axes([0.85,0.1,0.075,0.85])
self.x = npy.empty((120,120))
self.x.flat = npy.arange(120.0)*2*npy.pi/120.0
self.y = npy.empty((120,120))
self.y.flat = npy.arange(120.0)*2*npy.pi/100.0
self.y = npy.transpose(self.y)
z = npy.sin(self.x) + npy.cos(self.y)
self.im = a.imshow( z, cmap=cm.jet)#, interpolation='nearest')
self.fig.colorbar(self.im,cax=cax,orientation='vertical')
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def onTimer(self, evt):
self.x += npy.pi/15
self.y += npy.pi/20
z = npy.sin(self.x) + npy.cos(self.y)
self.im.set_array(z)
self.canvas.draw()
#self.canvas.gui_repaint() # jdh wxagg_draw calls this already
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
if __name__ == '__main__':
app = PySimpleApp()
frame = PlotFigure()
frame.init_plot_data()
# Initialise the timer - wxPython requires this to be connected to
# the receiving event handler
t = Timer(frame, TIMER_ID)
t.Start(200)
frame.Show()
app.MainLoop()
| gpl-2.0 |
matthew-tucker/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 19 | 3767 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
cheeseywhiz/cheeseywhiz | http/data/vectorized.py | 1 | 3649 | import csv
import sys
import time
import matplotlib.pyplot as plt
from config import data_sets
try:
data_set = data_sets[sys.argv[1]]
if sys.argv[1] not in data_sets:
raise IndexError
if sys.argv[1] in ['-h', '-help', '--help']:
raise IndexError
except IndexError as error:
keys = '\n'.join(key for key in data_sets)
print(f'Data sets:\n{keys}\nPut in arg #1')
sys.exit(1)
def timer(f):
"""\
Print the execution time of the wrapped function to console.
"""
def decorator(*args, **kwargs):
t = time.clock()
result = f(*args, **kwargs)
print('%.6f'%(time.clock() - t))
return result
return decorator
def restrict(index):
"""\
Restrict the wrapped function to access a specific index of the wrapped
function.
"""
def decorated(f):
def decorator(list, *args, **kwargs):
return type(list)([
*list[:index],
f(list[index], *args, **kwargs),
*list[index + 1:1],
])
return decorator
return decorated
def vector_2d(f):
"""\
Perform the same operation on each element of a 2d list
"""
def decorator(list, *args, **kwargs):
return [f(item, *args, **kwargs) for item in list]
return decorator
# allowing for None end chars
if data_set['str-end-chars'] is not None:
data_set['str-end-chars'] *= -1
with open(data_set['file-location']) as file:
# for processing huge files
csv.field_size_limit(sys.maxsize)
# you can unpack a list: no tupling required here
@timer
def raw_data_(file):
return list(csv.reader(file))
raw_data = raw_data_(file)
print('raw_data\n')
@timer
def formatted_data_(raw_data):
return [
(
row[data_set['label-index']],
row[data_set['data-index']][
data_set['str-start-chars']:data_set['str-end-chars']
]
)
for row in raw_data[1:]
]
formatted_data = formatted_data_(raw_data)
print('formatted_data\n')
# mo county data pairs coords differently
if data_set == data_sets['mo-counties']:
formatted_data = [
(label, coords.replace(',', ' '))
for label, coords in formatted_data
]
# finally some functions
@timer
@vector_2d
@restrict(1)
def split_coords_(str):
"""\
Split the str in position 1 for each element in the argument
"""
return str.split(' ')
split_coords = split_coords_(formatted_data)
print('split_coords\n')
# turn strings into floats by trimming off traiing characters if necessary
def float_recur(str, n=1):
if n > 1000: # Or else it causes stack overflow (???)
return None # Also good for debugging
try:
return float(str)
except Exception:
return float_recur(str[:-1], n=n + 1)
@timer
@vector_2d
@restrict(1)
def float_coords_(coords):
"""\
"""
return [float_recur(coord) for coord in coords]
float_coords = float_coords_(split_coords)
print('float_coords\n')
@timer
@vector_2d
@restrict(1)
def coord_pairs_(coords):
return [
(coords[i], coords[i + 1])
for i in range(len(coords))
if not i % 2
]
coord_pairs = coord_pairs_(float_coords)
print('coord_pairs\n')
@timer
def boundaries_(coord_pairs):
return [
(label, zip(*coords))
for label, coords in coord_pairs
]
boundaries = boundaries_(coord_pairs)
print('boundaries\n')
@timer
def plot_(boundaries):
for label, boundary in boundaries:
plt.plot(*boundary)
plot_(boundaries)
print('showing plot')
plt.show()
print('\ndone')
| mit |
tdhopper/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
UDST/urbanaccess | urbanaccess/network.py | 1 | 21031 | import time
import os
import geopy
from geopy import distance
from sklearn.neighbors import KDTree
import pandas as pd
from urbanaccess.utils import log, df_to_hdf5, hdf5_to_df
from urbanaccess import config
if int(geopy.__version__[0]) < 2:
dist_calc = distance.vincenty
else:
dist_calc = distance.geodesic
class urbanaccess_network(object):
"""
An urbanaccess object of pandas.DataFrames representing
the components of a graph network
Parameters
----------
transit_nodes : pandas.DataFrame
transit_edges : pandas.DataFrame
net_connector_edges : pandas.DataFrame
osm_nodes : pandas.DataFrame
osm_edges : pandas.DataFrame
net_nodes : pandas.DataFrame
net_edges : pandas.DataFrame
"""
def __init__(self,
transit_nodes=pd.DataFrame(),
transit_edges=pd.DataFrame(),
net_connector_edges=pd.DataFrame(),
osm_nodes=pd.DataFrame(),
osm_edges=pd.DataFrame(),
net_nodes=pd.DataFrame(),
net_edges=pd.DataFrame()):
self.transit_nodes = transit_nodes
self.transit_edges = transit_edges
self.net_connector_edges = net_connector_edges
self.osm_nodes = osm_nodes
self.osm_edges = osm_edges
self.net_nodes = net_nodes
self.net_edges = net_edges
# instantiate the UrbanAccess network object
ua_network = urbanaccess_network()
def _nearest_neighbor(df1, df2):
"""
For a DataFrame of xy coordinates find the nearest xy
coordinates in a subsequent DataFrame
Parameters
----------
df1 : pandas.DataFrame
DataFrame of records to return as the nearest record to records in df2
df2 : pandas.DataFrame
DataFrame of records with xy coordinates for which to find the
nearest record in df1 for
Returns
-------
df1.index.values[indexes] : pandas.Series
index of records in df1 that are nearest to the coordinates in df2
"""
try:
df1_matrix = df1.to_numpy()
df2_matrix = df2.to_numpy()
except AttributeError:
df1_matrix = df1.values
df2_matrix = df2.values
kdt = KDTree(df1_matrix)
indexes = kdt.query(df2_matrix, k=1, return_distance=False)
return df1.index.values[indexes]
def integrate_network(urbanaccess_network, headways=False,
urbanaccess_gtfsfeeds_df=None, headway_statistic='mean'):
"""
Create an integrated network comprised of transit and OSM nodes and edges
by connecting the transit network with the OSM network.
travel time is in units of minutes
Parameters
----------
urbanaccess_network : object
ua_network object with transit_edges, transit_nodes,
osm_edges, osm_nodes
headways : bool, optional
if true, route stop level headways calculated in a previous step
will be applied to the OSM to transit connector
edge travel time weights as an approximate measure
of average passenger transit stop waiting time.
urbanaccess_gtfsfeeds_df : object, optional
required if headways is true; the gtfsfeeds_dfs object that holds
the corresponding headways and stops DataFrames
headway_statistic : {'mean', 'std', 'min', 'max'}, optional
required if headways is true; route stop headway
statistic to apply to the OSM to transit connector edges:
mean, std, min, max. Default is mean.
Returns
-------
urbanaccess_network : object
urbanaccess_network.transit_edges : pandas.DataFrame
urbanaccess_network.transit_nodes : pandas.DataFrame
urbanaccess_network.osm_edges : pandas.DataFrame
urbanaccess_network.osm_nodes : pandas.DataFrame
urbanaccess_network.net_connector_edges : pandas.DataFrame
urbanaccess_network.net_edges : pandas.DataFrame
urbanaccess_network.net_nodes : pandas.DataFrame
"""
if urbanaccess_network is None:
raise ValueError('urbanaccess_network is not specified')
if urbanaccess_network.transit_edges.empty \
or urbanaccess_network.transit_nodes.empty \
or urbanaccess_network.osm_edges.empty \
or urbanaccess_network.osm_nodes.empty:
raise ValueError(
'one of the network objects: transit_edges, transit_nodes, '
'osm_edges, or osm_nodes were found to be empty.')
log('Loaded UrbanAccess network components comprised of:')
log(' Transit: {:,} nodes and {:,} edges;'.format(
len(urbanaccess_network.transit_nodes),
len(urbanaccess_network.transit_edges)))
log(' OSM: {:,} nodes and {:,} edges'.format(
len(urbanaccess_network.osm_nodes),
len(urbanaccess_network.osm_edges)))
if not isinstance(headways, bool):
raise ValueError('headways must be bool type')
if headways:
if urbanaccess_gtfsfeeds_df is None or \
urbanaccess_gtfsfeeds_df.headways.empty or \
urbanaccess_gtfsfeeds_df.stops.empty:
raise ValueError(
'stops and headway DataFrames were not found in the '
'urbanaccess_gtfsfeeds object. Please create these '
'DataFrames in order to use headways.')
valid_stats = ['mean', 'std', 'min', 'max']
if headway_statistic not in valid_stats or not isinstance(
headway_statistic, str):
raise ValueError('{} is not a supported statistic or is not a '
'string'.format(headway_statistic))
transit_edge_cols = urbanaccess_network.transit_edges.columns
if 'node_id_from' not in transit_edge_cols or 'from' in \
transit_edge_cols:
urbanaccess_network.transit_edges.rename(
columns={'from': 'node_id_from'}, inplace=True)
if 'node_id_to' not in transit_edge_cols or 'to' in transit_edge_cols:
urbanaccess_network.transit_edges.rename(
columns={'to': 'node_id_to'}, inplace=True)
urbanaccess_network.transit_edges['node_id_route_from'] = (
urbanaccess_network.transit_edges['node_id_from'].str.cat(
urbanaccess_network.transit_edges['unique_route_id'].astype(
'str'), sep='_'))
urbanaccess_network.transit_edges['node_id_route_to'] = (
urbanaccess_network.transit_edges['node_id_to'].str.cat(
urbanaccess_network.transit_edges['unique_route_id'].astype(
'str'), sep='_'))
urbanaccess_network.transit_nodes = _route_id_to_node(
stops_df=urbanaccess_gtfsfeeds_df.stops,
edges_w_routes=urbanaccess_network.transit_edges)
net_connector_edges = _connector_edges(
osm_nodes=urbanaccess_network.osm_nodes,
transit_nodes=urbanaccess_network.transit_nodes,
travel_speed_mph=3)
urbanaccess_network.net_connector_edges = _add_headway_impedance(
ped_to_transit_edges_df=net_connector_edges,
headways_df=urbanaccess_gtfsfeeds_df.headways,
headway_statistic=headway_statistic)
else:
urbanaccess_network.net_connector_edges = _connector_edges(
osm_nodes=urbanaccess_network.osm_nodes,
transit_nodes=urbanaccess_network.transit_nodes,
travel_speed_mph=3)
# change cols in transit edges and nodes
if headways:
urbanaccess_network.transit_edges.rename(columns={
'node_id_route_from': 'from', 'node_id_route_to': 'to'},
inplace=True)
urbanaccess_network.transit_edges.drop(['node_id_from', 'node_id_to'],
inplace=True, axis=1)
urbanaccess_network.transit_nodes.reset_index(inplace=True, drop=False)
urbanaccess_network.transit_nodes.rename(
columns={'node_id_route': 'id'}, inplace=True)
else:
urbanaccess_network.transit_edges.rename(
columns={'node_id_from': 'from', 'node_id_to': 'to'}, inplace=True)
urbanaccess_network.transit_nodes.reset_index(inplace=True, drop=False)
urbanaccess_network.transit_nodes.rename(columns={'node_id': 'id'},
inplace=True)
# concat all network components
urbanaccess_network.net_edges = pd.concat(
[urbanaccess_network.transit_edges,
urbanaccess_network.osm_edges,
urbanaccess_network.net_connector_edges], axis=0)
urbanaccess_network.net_nodes = pd.concat(
[urbanaccess_network.transit_nodes,
urbanaccess_network.osm_nodes], axis=0)
urbanaccess_network.net_edges, urbanaccess_network.net_nodes = \
_format_pandana_edges_nodes(edge_df=urbanaccess_network.net_edges,
node_df=urbanaccess_network.net_nodes)
success_msg_1 = ('Network edge and node network integration completed '
'successfully resulting in a total of {:,} nodes '
'and {:,} edges:')
success_msg_2 = ' Transit: {:,} nodes {:,} edges;'
success_msg_3 = ' OSM: {:,} nodes {:,} edges; and'
success_msg_4 = ' OSM/Transit connector: {:,} edges.'
log(success_msg_1.format(len(urbanaccess_network.net_nodes),
len(urbanaccess_network.net_edges)))
log(success_msg_2.format(len(urbanaccess_network.transit_nodes),
len(urbanaccess_network.transit_edges)))
log(success_msg_3.format(len(urbanaccess_network.osm_nodes),
len(urbanaccess_network.osm_edges)))
log(success_msg_4.format(len(urbanaccess_network.net_connector_edges)))
return urbanaccess_network
def _add_headway_impedance(ped_to_transit_edges_df, headways_df,
headway_statistic='mean'):
"""
Add route stop level headways to the OSM to transit connector
travel time weight column
Parameters
----------
ped_to_transit_edges_df : pandas.DataFrame
DataFrame of the OSM to transit connectors
headways_df : pandas.DataFrame
headways DataFrame
headway_statistic : {'mean', 'std', 'min', 'max'}, optional
required if headways is true; route stop headway statistic to apply
to the OSM to transit connector edges:
mean, std, min, max. Default is mean.
Returns
-------
osm_to_transit_wheadway : pandas.DataFrame
"""
start_time = time.time()
log(
'{} route stop headway will be used for pedestrian to transit edge '
'impedance.'.format(
headway_statistic))
osm_to_transit_wheadway = pd.merge(ped_to_transit_edges_df, headways_df[
[headway_statistic, 'node_id_route']],
how='left', left_on=['to'],
right_on=['node_id_route'], sort=False,
copy=False)
osm_to_transit_wheadway['weight_tmp'] = osm_to_transit_wheadway[
'weight'] + (
osm_to_transit_wheadway[
headway_statistic] / 2.0)
osm_to_transit_wheadway['weight_tmp'].fillna(
osm_to_transit_wheadway['weight'], inplace=True)
osm_to_transit_wheadway.drop('weight', axis=1, inplace=True)
osm_to_transit_wheadway.rename(columns={'weight_tmp': 'weight'},
inplace=True)
log('Headway impedance calculation completed. Took {:,.2f} seconds'.format(
time.time() - start_time))
return osm_to_transit_wheadway
def _route_id_to_node(stops_df, edges_w_routes):
"""
Assign route ids to the transit nodes table
Parameters
----------
stops_df : pandas.DataFrame
processed GTFS stops DataFrame
edges_w_routes : pandas.DataFrame
transit edge DataFrame that has route ID information
Returns
-------
transit_nodes_wroutes : pandas.DataFrame
"""
start_time = time.time()
# create unique stop IDs
stops_df['unique_stop_id'] = (
stops_df['stop_id'].str.cat(
stops_df['unique_agency_id'].astype('str'), sep='_'))
tmp1 = pd.merge(edges_w_routes[['node_id_from', 'node_id_route_from']],
stops_df[['unique_stop_id', 'stop_lat', 'stop_lon']],
how='left', left_on='node_id_from',
right_on='unique_stop_id', sort=False, copy=False)
tmp1.rename(columns={'node_id_route_from': 'node_id_route',
'stop_lon': 'x',
'stop_lat': 'y'},
inplace=True)
tmp2 = pd.merge(edges_w_routes[['node_id_to', 'node_id_route_to']],
stops_df[['unique_stop_id', 'stop_lat', 'stop_lon']],
how='left',
left_on='node_id_to',
right_on='unique_stop_id', sort=False, copy=False)
tmp2.rename(columns={'node_id_route_to': 'node_id_route',
'stop_lon': 'x',
'stop_lat': 'y'},
inplace=True)
transit_nodes_wroutes = pd.concat([tmp1[['node_id_route', 'x', 'y']],
tmp2[['node_id_route', 'x', 'y']]],
axis=0)
transit_nodes_wroutes.drop_duplicates(
subset='node_id_route', keep='first', inplace=True)
# set node index to be unique stop ID
transit_nodes_wroutes = transit_nodes_wroutes.set_index('node_id_route')
# set network type
transit_nodes_wroutes['net_type'] = 'transit'
log('routes successfully joined to transit nodes. '
'Took {:,.2f} seconds'.format(time.time() - start_time))
return transit_nodes_wroutes
def _connector_edges(osm_nodes, transit_nodes, travel_speed_mph=3):
"""
Generate the connector edges between the OSM and transit edges and
weight by travel time
Parameters
----------
osm_nodes : pandas.DataFrame
OSM nodes DataFrame
transit_nodes : pandas.DataFrame
transit nodes DataFrame
travel_speed_mph : int, optional
travel speed to use to calculate travel time across a
distance on an edge. units are in miles per hour (MPH)
for pedestrian travel this is assumed to be 3 MPH
Returns
-------
net_connector_edges : pandas.DataFrame
"""
start_time = time.time()
transit_nodes['nearest_osm_node'] = _nearest_neighbor(
osm_nodes[['x', 'y']],
transit_nodes[['x', 'y']])
net_connector_edges = []
for transit_node_id, row in transit_nodes.iterrows():
# create new edge between the node in df2 (transit)
# and the node in OpenStreetMap (pedestrian)
osm_node_id = int(row['nearest_osm_node'])
osm_row = osm_nodes.loc[osm_node_id]
distance = dist_calc((row['y'], row['x']),
(osm_row['y'], osm_row['x'])).miles
time_ped_to_transit = distance / travel_speed_mph * 60
time_transit_to_ped = distance / travel_speed_mph * 60
# save the edge
net_type = 'transit to osm'
net_connector_edges.append((transit_node_id, osm_node_id,
time_transit_to_ped, net_type))
# make the edge bi-directional
net_type = 'osm to transit'
net_connector_edges.append((osm_node_id, transit_node_id,
time_ped_to_transit, net_type))
net_connector_edges = pd.DataFrame(net_connector_edges,
columns=["from", "to",
"weight", "net_type"])
log(
'Connector edges between the OSM and transit network nodes '
'successfully completed. Took {:,.2f} seconds'.format(
time.time() - start_time))
return net_connector_edges
def _format_pandana_edges_nodes(edge_df, node_df):
"""
Perform final formatting on nodes and edge DataFrames to prepare them
for use in Pandana. Formatting mainly consists of creating an unique
node ID and edge from and to ID that is an integer
per Pandana requirements.
Parameters
----------
edge_df : pandas.DataFrame
integrated transit and OSM edge DataFrame
node_df : pandas.DataFrame
integrated transit and OSM node DataFrame
Returns
-------
edge_df_wnumericid, node_df : pandas.DataFrame
"""
start_time = time.time()
# Pandana requires IDs that are integer: for nodes - make it the index,
# for edges make it the from and to columns
node_df['id_int'] = range(1, len(node_df) + 1)
edge_df.rename(columns={'id': 'edge_id'}, inplace=True)
tmp = pd.merge(edge_df, node_df[['id', 'id_int']], left_on='from',
right_on='id', sort=False, copy=False, how='left')
tmp['from_int'] = tmp['id_int']
tmp.drop(['id_int', 'id'], axis=1, inplace=True)
edge_df_wnumericid = pd.merge(tmp, node_df[['id', 'id_int']], left_on='to',
right_on='id', sort=False, copy=False,
how='left')
edge_df_wnumericid['to_int'] = edge_df_wnumericid['id_int']
edge_df_wnumericid.drop(['id_int', 'id'], axis=1, inplace=True)
# turn mixed dtype cols into all same format
col_list = edge_df_wnumericid.select_dtypes(include=['object']).columns
for col in col_list:
try:
edge_df_wnumericid[col] = edge_df_wnumericid[col].astype(str)
# deal with edge cases where typically the name of a street is not
# in an uniform string encoding such as names with accents
except UnicodeEncodeError:
log('Fixed unicode error in {} column'.format(col))
edge_df_wnumericid[col] = edge_df_wnumericid[col].str.encode(
'utf-8')
node_df.set_index('id_int', drop=True, inplace=True)
# turn mixed dtype col into all same format
node_df['id'] = node_df['id'].astype(str)
if 'nearest_osm_node' in node_df.columns:
node_df.drop(['nearest_osm_node'], axis=1, inplace=True)
log('Edge and node tables formatted for Pandana with integer node IDs: '
'id_int, to_int, and from_int. Took {:,.2f} seconds'.format(
time.time() - start_time))
return edge_df_wnumericid, node_df
def save_network(urbanaccess_network, filename,
dir=config.settings.data_folder,
overwrite_key=False, overwrite_hdf5=False):
"""
Write urbanaccess_network integrated nodes and edges to a node and edge
table in a HDF5 file
Parameters
----------
urbanaccess_network : object
urbanaccess_network object with net_edges and net_nodes DataFrames
filename : string
name of the HDF5 file to save with .h5 extension
dir : string, optional
directory to save HDF5 file
overwrite_key : bool, optional
if true any existing table with the specified key name will be
overwritten
overwrite_hdf5 : bool, optional
if true any existing HDF5 file with the specified name in the
specified directory will be overwritten
Returns
-------
None
"""
log('Writing HDF5 store...')
if urbanaccess_network is None or urbanaccess_network.net_edges.empty or \
urbanaccess_network.net_nodes.empty:
raise ValueError('Either no urbanaccess_network specified or '
'net_edges or net_nodes are empty.')
df_to_hdf5(data=urbanaccess_network.net_edges, key='edges',
overwrite_key=overwrite_key, dir=dir,
filename=filename, overwrite_hdf5=overwrite_hdf5)
df_to_hdf5(data=urbanaccess_network.net_nodes, key='nodes',
overwrite_key=overwrite_key, dir=dir, filename=filename,
overwrite_hdf5=overwrite_hdf5)
log("Saved HDF5 store: {} with tables: ['net_edges', 'net_nodes'].".format(
os.path.join(dir, filename)))
def load_network(dir=config.settings.data_folder, filename=None):
"""
Read an integrated network node and edge data from a HDF5 file to
an urbanaccess_network object
Parameters
----------
dir : string, optional
directory to read HDF5 file
filename : string
name of the HDF5 file to read with .h5 extension
Returns
-------
ua_network : object
urbanaccess_network object with net_edges and net_nodes DataFrames
ua_network.net_edges : object
ua_network.net_nodes : object
"""
log('Loading HDF5 store...')
ua_network.net_edges = hdf5_to_df(dir=dir, filename=filename, key='edges')
ua_network.net_nodes = hdf5_to_df(dir=dir, filename=filename, key='nodes')
log("Read HDF5 store: {} tables: ['net_edges', 'net_nodes'].".format(
os.path.join(dir, filename)))
return ua_network
| agpl-3.0 |
ywang007/odo | odo/into.py | 1 | 4396 | from __future__ import absolute_import, division, print_function
from toolz import merge
from multipledispatch import Dispatcher
from .convert import convert
from .append import append
from .resource import resource
from .utils import ignoring
from datashape import discover
from datashape.dispatch import namespace
from datashape.predicates import isdimension
from .compatibility import unicode
from pandas import DataFrame, Series
from numpy import ndarray
not_appendable_types = DataFrame, Series, ndarray, tuple
if 'into' not in namespace:
namespace['into'] = Dispatcher('into')
into = namespace['into']
@into.register(type, object)
def into_type(a, b, dshape=None, **kwargs):
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(b)
return convert(a, b, dshape=dshape, **kwargs)
@into.register(object, object)
def into_object(target, source, dshape=None, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename'
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Examples
--------
>>> L = into(list, (1, 2, 3)) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = into(L, (4, 5, 6)) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> into('myfile.csv', [('Alice', 1), ('Bob', 2)]) # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> into('accounts.json', [('Alice', 100), ('Bob', 200)], dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> into(list, 'accounts.csv', has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
into.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
into.convert.convert - Convert things into new things
into.append.append - Add things onto existing things
"""
if isinstance(source, (str, unicode)):
source = resource(source, dshape=dshape, **kwargs)
if type(target) in not_appendable_types:
raise TypeError('target of %s type does not support in-place append' % type(target))
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(source)
return append(target, source, dshape=dshape, **kwargs)
@into.register((str, unicode), object)
def into_string(uri, b, dshape=None, **kwargs):
if dshape is None:
dshape = discover(b)
resource_ds = 0 * dshape.subshape[0] if isdimension(dshape[0]) else dshape
a = resource(uri, dshape=resource_ds, expected_dshape=dshape, **kwargs)
return into(a, b, dshape=dshape, **kwargs)
@into.register((type, (str, unicode)), (str, unicode))
def into_string_string(a, b, **kwargs):
return into(a, resource(b, **kwargs), **kwargs)
@into.register(object)
def into_curried(o, **kwargs1):
def curried_into(other, **kwargs2):
return into(o, other, **merge(kwargs2, kwargs1))
return curried_into
| bsd-3-clause |
txd866/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
jakobworldpeace/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 47 | 4852 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X_test[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[sample_id, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
Subarno/MachineLearningPracticePrograms | kmeans.py | 1 | 5324 | import numpy as np
import matplotlib.pyplot as plt
import random
from base64 import b64decode
from json import loads
def parse(x):
#parse the digits file into tuples of
digit = loads(x)
array = np.fromstring(b64decode(digit["data"]),dtype=np.ubyte)
array = array.astype(np.float64)
return (digit["label"], array)
def cluster(labelled_data, k):
centroids = init_centroids(labelled_data, k)
clusters = form_clusters(labelled_data, centroids)
final_clusters, final_centroids = repeat_until_convergence(labelled_data, clusters, centroids)
return final_clusters, final_centroids
def init_centroids(labelled_data,k):
#Initialize random centroid positions
return list(map(lambda x: x[1], random.sample(labelled_data,k)))
def form_clusters(labelled_data, unlabelled_centroids):
#allocate each datapoint to its closest centroid
centroids_indices = range(len(unlabelled_centroids))
clusters = {c: [] for c in centroids_indices}
for (label,Xi) in labelled_data:
# for each datapoint, pick the closest centroid.
smallest_distance = float("inf")
for cj_index in centroids_indices:
cj = unlabelled_centroids[cj_index]
distance = np.linalg.norm(Xi - cj)
if distance < smallest_distance:
closest_centroid_index = cj_index
smallest_distance = distance
# allocate that datapoint to the cluster of that centroid.
clusters[closest_centroid_index].append((label,Xi))
return clusters.values()
def repeat_until_convergence(labelled_data, labelled_clusters, unlabelled_centroids):
#find best fitting centroids to the labelled_data
previous_max_difference = 0
while True:
unlabelled_old_centroids = unlabelled_centroids
unlabelled_centroids = move_centroids(labelled_clusters)
labelled_clusters = form_clusters(labelled_data, unlabelled_centroids)
differences = list(map(lambda a, b: np.linalg.norm(a-b),unlabelled_old_centroids,unlabelled_centroids))
max_difference = max(differences)
if np.isnan(max_difference-previous_max_difference):
difference_change = np.nan
else:
difference_change = abs((max_difference-previous_max_difference)/np.mean([previous_max_difference,max_difference])) * 100
previous_max_difference = max_difference
# difference change is nan once the list of differences is all zeroes.
if np.isnan(difference_change):
break
return labelled_clusters, unlabelled_centroids
def move_centroids(labelled_clusters):
"""
returns a list of centroids corresponding to the clusters.
"""
new_centroids = []
for cluster in labelled_clusters:
new_centroids.append(mean_cluster(cluster))
return new_centroids
def mean_cluster(labelled_cluster):
#return mean of a labelled_cluster
sum_of_points = sum_cluster(labelled_cluster)
mean_of_points = sum_of_points * (1.0 / len(labelled_cluster))
return mean_of_points
def sum_cluster(labelled_cluster):
# assumes len(cluster) > 0
sum_ = labelled_cluster[0][1].copy()
for (label,vector) in labelled_cluster[1:]:
sum_ += vector
return sum_
def assign_labels_to_centroids(clusters, centroids):
#assign a label to each centroid
labelled_centroids = []
clusters = list(clusters)
for i in range(len(clusters)):
labels = list(map(lambda x: x[0], clusters[i]))
# pick the most common label
most_common = max(set(labels), key=labels.count)
centroid = (most_common, centroids[i])
labelled_centroids.append(centroid)
return labelled_centroids
def get_error_rate(digits,labelled_centroids):
#classifies a list of labelled digits. returns the error rate.
classified_incorrect = 0
for (label,digit) in digits:
classified_label = classify_digit(digit, labelled_centroids)
if classified_label != label:
classified_incorrect +=1
error_rate = classified_incorrect / float(len(digits))
return error_rate
def classify_digit(digit, labelled_centroids):
mindistance = float("inf")
for (label, centroid) in labelled_centroids:
distance = np.linalg.norm(centroid - digit)
if distance < mindistance:
mindistance = distance
closest_centroid_label = label
return closest_centroid_label
#Read the data file
with open("data/digits.base64.json","r") as f:
digits = list(map(parse, f.readlines()))
#devide the dataset into training and validation set
ratio = int(len(digits)*0.25)
validation = digits[:ratio]
training = digits[ratio:]
error_rates = {x:None for x in range(5,25)}
for k in range(5,25):
#training
trained_clusters, trained_centroids = cluster(training, k)
labelled_centroids = assign_labels_to_centroids(trained_clusters, trained_centroids)
#validation
error_rate = get_error_rate(validation, labelled_centroids)
error_rates[k] = error_rate
# Show the error rates
x_axis = sorted(error_rates.keys())
y_axis = [error_rates[key] for key in x_axis]
plt.figure()
plt.title("Error Rate by Number of Clusters")
plt.scatter(x_axis, y_axis)
plt.xlabel("Number of Clusters")
plt.ylabel("Error Rate")
plt.savefig('Results/kmeans_acc.png')
| gpl-3.0 |
julien6387/supervisors | setup.py | 2 | 3152 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import os
import sys
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
if py_version < (3, 4):
raise RuntimeError('Supvisors requires Python 3.4 or later')
requires = ['supervisor >= 4.2.1', 'pyzmq >= 20.0.0']
ip_require = ['netifaces >= 0.10.9']
statistics_require = ['psutil >= 5.7.3', 'matplotlib >= 3.3.3']
xml_valid_require = ['lxml >= 4.6.2']
testing_extras = ['pytest >= 2.5.2', 'pytest-cov']
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
except:
README = """Supvisors is a control system for distributed applications over multiple Supervisor instances. """
CHANGES = ''
CLASSIFIERS = [
"License :: OSI Approved :: Apache Software License",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"Environment :: No Input/Output (Daemon)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Topic :: System :: Boot",
"Topic :: System :: Monitoring",
"Topic :: System :: Software Distribution"
]
version_txt = os.path.join(here, 'supvisors/version.txt')
supvisors_version = open(version_txt).read().split('=')[1].strip()
dist = setup(
name='supvisors',
version=supvisors_version,
description="A Control System for Distributed Applications",
long_description=README + '\n\n' + CHANGES,
long_description_content_type='text/markdown',
classifiers=CLASSIFIERS,
author="Julien Le Cléach",
author_email="[email protected]",
url="https://github.com/julien6387/supvisors",
download_url='https://github.com/julien6387/supvisors/archive/%s.tar.gz' % supvisors_version,
platforms=[
"CentOS 8.3"
],
packages=find_packages(),
install_requires=requires,
extras_require={'ip_address': ip_require,
'statistics': statistics_require,
'xml_valid': xml_valid_require,
'all': ip_require + statistics_require + xml_valid_require,
'testing': testing_extras},
include_package_data=True,
zip_safe=False,
namespace_packages=['supvisors'],
test_suite="supvisors.tests",
)
| apache-2.0 |
nvoron23/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
a301-teaching/a301_code | notebooks/python/satelliteI.py | 1 | 6461 |
# coding: utf-8
# # Intro to satellite data I
#
# In this notebook we take a quick look at a 5 minutes of satellite data acquired from the MODIS instrument on the Aqua polar orbiting satellite. The granule covers the period from 20:15 to 20:20 UCT on May 15, 2016 (Julian day 136) while Aqua flew over Ft. McMurray, Alberta. I downloaded the granule from the [Laadsweb NASA site]( https://ladsweb.nascom.nasa.gov/data/search.html) and converted it from HDF4 to HDF5 format (more on [this](https://www.hdfgroup.org/h5h4-diff.html) later). The structure of HDF5 files can be explored with the [HDFViewer tool](https://www.hdfgroup.org/products/java/release/download.html) (install version 2.13 from that link). The gory details are in the [Modis Users Guide](http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/modis_users_guide.pdf).
#
# First, download the file from our course website:
# In[1]:
from a301utils.a301_readfile import download
import h5py
filename = 'MYD021KM.A2016136.2015.006.2016138123353.h5'
download(filename)
# Here is the corresponding red,green,blue color composite for the granule.
# In[13]:
from IPython.display import Image
Image(url='http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/aqua_136_2015.jpg',width=600)
# ### now use h5py to read some of the satellite channels
# In[7]:
h5_file=h5py.File(filename)
# h5 files have attributes -- stored as a dictionary
# In[9]:
print(list(h5_file.attrs.keys()))
# ### print two of the attributes
# In[10]:
print(h5_file.attrs['Earth-Sun Distance_GLOSDS'])
# In[11]:
print(h5_file.attrs['HDFEOSVersion_GLOSDS'])
# h5 files have variables -- stored in a dictionary.
# The fields are aranged in a hierarchy of groups similar to a set of nested folders
# Here is what HDFViewer reports for the structure of the "EV_1KM_Emissive" dataset, which stands for "Earth View, 1 km pixel resolution, thermal emission channels". It is showing a 3 dimensional array of integers of shape (16,2030,1354). These are radiometer counts in 16 different wavelength channels for the 2030 x 1354 pixel granule.
# In[14]:
Image('screenshots/HDF_file_structure.png')
# **Read the radiance data from MODIS_SWATH_Type_L1B/Data Fields/EV_1KM_Emissive**
# Note the correspondence between the keys and the fields you see in HDFView:
#
# Here are the top level groups:
# In[17]:
print(list(h5_file.keys()))
# and the 'MODIS_SWATH_Type_L1B' group contains 3 subgroups:
# In[18]:
print(list(h5_file['MODIS_SWATH_Type_L1B'].keys()))
# and the 'Data Fields' subgroup contains 27 more groups:
# In[57]:
print(list(h5_file['MODIS_SWATH_Type_L1B/Data Fields'].keys()))
# Print out the 16 channel numbers stored in Band_1KM_Emissive data array. The [...] means "read everything". The 16 thermal channels are channels 20-36. Their wavelength ranges and common uses are listed
# [here](https://modis.gsfc.nasa.gov/about/specifications.php)
# In[22]:
print(h5_file['MODIS_SWATH_Type_L1B/Data Fields/Band_1KM_Emissive'][...])
# **note that channel 31, which covers the wavelength range 10.78-11.28 $\mu m$ occurs at index value 10 (remember python counts from 0)**
# In[24]:
index31=10
# **the data are stored as unsigned (i.e. only positive values), 2 byte (16 bit) integers which can hold values from 0 to $2^{16}$ - 1 = 65,535.
# The ">u2" notation below for the datatype (dtype) says that the data is unsigned, 2 byte, with the most significant
# byte stored first ("big endian", which is the same way we write numbers)**
#
# (Although the 2 byte words contain 16 bits, only 12 bits are significant).
#
# (h5py let's you specify the group names one at a time, instead of using '/' to separate them. This is convenient if you are storing your field name in a variable, for example.)
# In[42]:
my_name = 'EV_1KM_Emissive'
chan31=h5_file['MODIS_SWATH_Type_L1B']['Data Fields'][my_name][index31,:,:]
print(chan31.shape,chan31.dtype)
# **Print the first 3 rows and columns**
# In[26]:
chan31[:3,:3]
# ** we need to apply a
# scale and offset to convert counts to radiance, with units of $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$). More about the
# sr units later**
# $Data = (RawData - offset) \times scale$
#
# this information is included in the attributes of each variable.
#
# (see page 36 of the [Modis Users Guide](http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/modis_users_guide.pdf))
# **here is the scale for all 16 channels**
# In[33]:
scale=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_scales'][...]
print(scale)
# **and here is the offset for 16 channels**
# In[34]:
offset=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_offsets'][...]
print(offset)
# **note that as the satellite ages and wears out, these calibration coefficients change**
# In[35]:
chan31_calibrated =(chan31 - offset[index31])*scale[index31]
# In[27]:
get_ipython().magic('matplotlib inline')
# **histogram the raw counts -- note that hist doesn't know how to handle 2-dim arrays, so flatten to 1-d**
# In[55]:
import matplotlib.pyplot as plt
out=plt.hist(chan31.flatten())
#
# get the current axis to add title with gca()
#
ax = plt.gca()
_=ax.set(title='Aqua Modis raw counts')
# **histogram the calibrated radiances and show that they lie between
# 0-10 $W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$ **
# In[45]:
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1,1)
ax.hist(chan31_calibrated.flatten())
_=ax.set(xlabel='radiance $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$)',
title='channel 31 radiance for Aqua Modis')
# ** Next Read MODIS_SWATH_Type_L1B/Geolocation Fields/Longitude**
# note that the longitude and latitude arrays are (406,271) while the actual
# data are (2030,1354). These lat/lon arrays show only every fifth row and column to
# save space. The full lat/lon arrays are stored in a separate file.
# In[54]:
lon_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Longitude'][...]
lat_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Latitude'][...]
_=plt.plot(lon_data[:10,:10],lat_data[:10,:10],'b+')
# **Note two things: 1) the pixels overlap and 2) they don't line up on lines of constant longitude and latitude**
#
# **The pixels are also not all the same size -- this distortion is called the [bowtie effect](http://eoweb.dlr.de:8080/short_guide/D-MODIS.html)**
#
# **Next -- plotting image data**
# In[ ]:
| mit |
heliazandi/volttron-applications | pnnl/TCMAgent/tcm/agent.py | 5 | 12924 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now,
format_timestamp)
import pandas as pd
import statsmodels.formula.api as sm
utils.setup_logging()
_log = logging.getLogger(__name__)
class TCMAgent(Agent):
def __init__(self, config_path, **kwargs):
super(TCMAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
self.site = self.config.get('campus')
self.building = self.config.get('building')
self.unit = self.config.get('unit')
self.subdevices = self.config.get('subdevices')
self.out_temp_name = self.config.get('out_temp_name')
self.supply_temp_name = self.config.get('supply_temp_name')
self.zone_temp_name = self.config.get('zone_temp_name')
self.air_flow_rate_name = self.config.get('air_flow_rate_name')
self.aggregate_in_min = self.config.get('aggregate_in_min')
self.aggregate_freq = str(self.aggregate_in_min) + 'Min'
self.ts_name = self.config.get('ts_name')
self.Qhvac_name = 'Q_hvac'
self.Qhvac_new_name = 'Q_hvac_new'
self.zone_temp_new_name = self.zone_temp_name + '_new'
self.window_size_in_day = int(self.config.get('window_size_in_day'))
self.min_required_window_size_in_percent = float(self.config.get('min_required_window_size_in_percent'))
self.interval_in_min = int(self.config.get('interval_in_min'))
self.no_of_recs_needed = self.window_size_in_day * 24 * (60 / self.interval_in_min)
self.min_no_of_records_needed_after_aggr = int(self.min_required_window_size_in_percent/100 *
self.no_of_recs_needed/self.aggregate_in_min)
self.schedule_run_in_sec = int(self.config.get('schedule_run_in_day')) * 86400
self.rho = 1.204
self.c_p = 1006.0
# Testing
#self.no_of_recs_needed = 200
#self.min_no_of_records_needed_after_aggr = self.no_of_recs_needed/self.aggregate_in_min
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
self.core.periodic(self.schedule_run_in_sec, self.calculate_latest_coeffs)
def calculate_latest_coeffs(self):
unit_topic_tmpl = "{campus}/{building}/{unit}/{point}"
topic_tmpl = "{campus}/{building}/{unit}/{subdevice}/{point}"
unit_points = [self.out_temp_name, self.supply_temp_name]
zone_points = [self.zone_temp_name, self.air_flow_rate_name]
df = None
for point in unit_points:
unit_topic = unit_topic_tmpl.format(campus=self.site,
building=self.building,
unit=self.unit,
point=point)
result = self.vip.rpc.call('platform.historian',
'query',
topic=unit_topic,
count=self.no_of_recs_needed,
order="LAST_TO_FIRST").get(timeout=1000)
df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])
self.convert_units_to_SI(df2, point, result['metadata']['units'])
df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])
df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()
#df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))
df = df2 if df is None else pd.merge(df, df2, how='outer', left_index=True, right_index=True)
for subdevice in self.subdevices:
for point in zone_points:
# Query data from platform historian
topic = topic_tmpl.format(campus=self.site,
building=self.building,
unit=self.unit,
subdevice=subdevice,
point=point)
result = self.vip.rpc.call('platform.historian',
'query',
topic=topic,
count=self.no_of_recs_needed,
order="LAST_TO_FIRST").get(timeout=1000)
# Merge new point data to df
df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])
self.convert_units_to_SI(df2, point, result['metadata']['units'])
df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])
df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()
#df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))
df = pd.merge(df, df2, how='outer', left_index=True, right_index=True)
#print(df)
coeffs = self.calculate_coeffs(df)
# Publish coeffs to store
if coeffs is not None:
self.save_coeffs(coeffs, subdevice)
def convert_units_to_SI(self, df, point, unit):
if unit == 'degreesFahrenheit':
df[point] = (df[point]-32) * 5/9
# Air state assumption: http://www.remak.eu/en/mass-air-flow-rate-unit-converter
# 1cfm ~ 0.00055kg/s
if unit == 'cubicFeetPerMinute':
df[point] = df[point] * 0.00055
def calculate_coeffs(self, df):
# check if there is enough data
l = len(df.index)
if l < self.min_no_of_records_needed_after_aggr:
_log.exception('Not enough data to process')
return None
df[self.Qhvac_name] = self.rho * self.c_p * df[self.air_flow_rate_name] * \
(df[self.supply_temp_name] - df[self.zone_temp_name])
# align future predicted value with current predictors
# this is the next time interval
lag = 1
df = df.append(df[-1:], ignore_index=True)
df[self.zone_temp_new_name] = df[self.zone_temp_name].shift(-lag)
df[self.Qhvac_new_name] = df[self.Qhvac_name].shift(-lag)
df = df.dropna(subset=[self.zone_temp_new_name, self.Qhvac_new_name])
# calculate model coefficients
T_coeffs = self.cal_T_coeffs(df)
Q_coeffs = self.cal_Q_coeffs(df)
return {"T_fit": T_coeffs, "Q_fit": Q_coeffs}
def cal_T_coeffs(self, df):
# the regression to predict new temperature given a new cooling rate
formula = "{T_in_new} ~ {T_in} + {T_out} + {Q_hvac_new} + {Q_hvac}".format(
T_in_new=self.zone_temp_new_name,
T_in=self.zone_temp_name,
T_out=self.out_temp_name,
Q_hvac_new=self.Qhvac_new_name,
Q_hvac=self.Qhvac_name
)
T_fit = sm.ols(formula=formula, data=df).fit()
return T_fit
def cal_Q_coeffs(self, df):
# the regression to predict new temperature given a new cooling rate
formula = "{Q_hvac_new} ~ {T_in} + {T_out} + {T_in_new} + {Q_hvac}".format(
T_in_new=self.zone_temp_new_name,
T_in=self.zone_temp_name,
T_out=self.out_temp_name,
Q_hvac_new=self.Qhvac_new_name,
Q_hvac=self.Qhvac_name
)
Q_fit = sm.ols(formula=formula, data=df).fit()
return Q_fit
def save_coeffs(self, coeffs, subdevice):
topic_tmpl = "analysis/TCM/{campus}/{building}/{unit}/{subdevice}/"
topic = topic_tmpl.format(campus=self.site,
building=self.building,
unit=self.unit,
subdevice=subdevice)
T_coeffs = coeffs["T_fit"]
Q_coeffs = coeffs["Q_fit"]
headers = {'Date': format_timestamp(get_aware_utc_now())}
for idx in xrange(0,5):
T_topic = topic + "T_c" + str(idx)
Q_topic = topic + "Q_c" + str(idx)
self.vip.pubsub.publish(
'pubsub', T_topic, headers, T_coeffs.params[idx])
self.vip.pubsub.publish(
'pubsub', Q_topic, headers, Q_coeffs.params[idx])
_log.debug(T_coeffs.params)
_log.debug(Q_coeffs.params)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.vip_main(TCMAgent)
except Exception as e:
_log.exception('unhandled exception')
def test_ols():
'''To compare result of pandas and R's linear regression'''
import os
test_csv = '../test_data/tcm_ZONE_VAV_150_data.csv'
df = pd.read_csv(test_csv)
config_path = os.environ.get('AGENT_CONFIG')
tcm = TCMAgent(config_path)
coeffs = tcm.calculate_coeffs(df)
if coeffs is not None:
T_coeffs = coeffs["T_fit"]
Q_coeffs = coeffs["Q_fit"]
_log.debug(T_coeffs.params)
_log.debug(Q_coeffs.params)
def test_api():
'''To test Volttron APIs'''
import os
topic_tmpl = "{campus}/{building}/{unit}/{subdevice}/{point}"
tcm = TCMAgent(os.environ.get('AGENT_CONFIG'))
topic1 = topic_tmpl.format(campus='PNNL',
building='SEB',
unit='AHU1',
subdevice='VAV123A',
point='MaximumZoneAirFlow')
result = tcm.vip.rpc.call('platform.historian',
'query',
topic=topic1,
count=20,
order="LAST_TO_FIRST").get(timeout=100)
assert result is not None
if __name__ == '__main__':
# Entry point for script
sys.exit(main())
#test_api()
| bsd-3-clause |
luis-nogoseke/lfn-optimization | rosen_min.py | 1 | 5085 | from scipy.optimize import minimize, rosen, rosen_der
from numpy.random import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm, ticker
from matplotlib.colors import LogNorm
from matplotlib.legend_handler import HandlerLine2D
from matplotlib import pyplot
import numpy as np
from numpy import array as array
import timeit
# Plot the function
fig = plt.figure()
ax = Axes3D(fig, azim = -128, elev = 43)
s = .05
X = np.arange(-2, 2.+s, s)
Y = np.arange(-1, 3.+s, s)
X, Y = np.meshgrid(X, Y)
Z = rosen([X, Y])
ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = cm.jet, linewidth=0, edgecolor='none')
ax.set_xlim([-2,2.0])
ax.set_ylim([-1,3.0])
ax.set_zlim([0, 2500])
plt.xlabel("x")
plt.ylabel("y")
plt.title('Rosenbrock 2D')
plt.figure()
# plt.show()
##################################################################
##################################################################
# Get 30 solutions
xs = []
ys = []
it = 0
def bstop(xk):
xs.append(np.copy(xk))
ys.append(rosen(xk))
global it
it = it + 1
iters = []
feval = []
sol = []
objective = []
times= []
#x0s = []
for i in range(0, 30):
# x0 = (random(2)-1)*20
# x0s.append(x0)
global it
it = 0
start_time = timeit.default_timer()
output = minimize(rosen, x0s[i], method='L-BFGS-B', callback=bstop, options= {'disp': True})
times.append(timeit.default_timer() - start_time)
iters.append(it)
feval.append(output.nfev)
sol.append(output.x)
objective.append(output.fun)
##################################################################
# Plot solution on isolines
# Isolines
delta = 0.05
s = 0.05
X = np.arange(-1.5, 1.5, delta)
Y = np.arange(-1, 3, delta)
X, Y = np.meshgrid(X, Y)
Z = rosen([X, Y])
levels = np.arange(10, 300, 10)
plt.contour(X, Y, Z, levels=levels, norm=LogNorm())
xs = [np.array([-1, -0.5])]
ys = [rosen(xs[0])]
minimize(rosen, [-1, -0.5], method='BFGS', callback=bstop, options= {'disp': True})
linex = [-1]
liney = [-0.5]
for i in xs:
linex.append(i[0])
liney.append(i[1])
bfgs_y = list(ys)
bfgs, = plt.plot(linex, liney, '-o', label='BFGS')
xs = [np.array([-1, -0.5])]
ys = [rosen(xs[0])]
minimize(rosen, [-1, -0.5], method='L-BFGS-B', callback=bstop, options= {'disp': True})
linex = [-1]
liney = [-0.5]
for i in xs:
linex.append(i[0])
liney.append(i[1])
lbfgsb_y = list(ys)
lbfgsb, = plt.plot(linex, liney, '-s', label='L-BFGS-B')
#xs = [np.array([-1, -0.5])]
xs = [array([-1, -5.000000e-01]),
array([0, -3.311268e-03]),
array([1.355493e-03, -5.959506e-03]),
array([3.809721e-02, 3.027428e-03]),
array([2.079182e-01, 4.341413e-02]),
array([2.970837e-01, 6.464193e-02]),
array([2.855097e-01, 6.251982e-02]),
array([2.888685e-01, 7.041021e-02]),
array([3.002800e-01, 9.371150e-02]),
array([3.301636e-01, 1.239246e-01]),
array([3.431667e-01, 1.362663e-01]),
array([3.790113e-01, 1.696302e-01]),
array([6.666666e-01, 4.364363e-01]),
array([6.673740e-01, 4.370968e-01]),
array([6.827426e-01, 4.586153e-01]),
array([8.033587e-01, 6.278525e-01]),
array([7.713845e-01, 5.861505e-01]),
array([7.816157e-01, 6.030214e-01]),
array([8.603122e-01, 7.330873e-01]),
array([8.943182e-01, 7.953762e-01]),
array([9.339127e-01, 8.700019e-01]),
array([9.673623e-01, 9.336677e-01]),
array([9.848576e-01, 9.714503e-01]),
array([1.000155e+00, 9.998819e-01]),
array([9.986836e-01, 9.973498e-01]),
array([9.995547e-01, 9.990956e-01])]
#ys = [rosen(xs[0])]
ys = [229,
1.001096e+00,
1.000845e+00,
9.255054e-01,
6.273970e-01,
5.498666e-01,
5.465811e-01,
5.226986e-01,
4.908637e-01,
4.709314e-01,
4.656657e-01,
4.531264e-01,
1.175240e-01,
1.175146e-01,
1.063106e-01,
6.940715e-02,
6.015688e-02,
5.393538e-02,
2.448267e-02,
1.313008e-02,
4.847591e-03,
1.515564e-03,
4.560516e-04,
1.832000e-05,
1.769561e-06,
2.180442e-07,
1.568248e-10]
#minimize(rosen, [-1, -0.5], method=_minimize_dfp, callback=bstop, options= {'disp': True})
linex = []
liney = []
for i in xs:
linex.append(i[0])
liney.append(i[1])
powell_y = list(ys)
powell, = plt.plot(linex, liney, '-^', label='DFP')
plt.legend(handles=[bfgs, lbfgsb, powell])
plt.title('Isolines')
plt.xlabel('x1')
plt.ylabel('x2')
plt.figure()
b, = plt.plot(bfgs_y, '-o', label='BFGS')
l, = plt.plot(lbfgsb_y, '-s', label='L-BFGS-B')
p, = plt.plot(powell_y, '-^', label='DFP')
pyplot.yscale('log')
plt.grid(True)
plt.title('Objective')
plt.legend(handles=[b, l, p])
plt.xlabel('Number of Iterations')
plt.ylabel('Objective')
plt.show()
##################################################################
iters = []
feval = []
sol = []
objective = []
x0s = []
it = 0
for i in range(0, 30):
x0 = (random(30)-1)*10
x0s.append(x0)
global it
it = 0
output = minimize(rosen, x0, method='BFGS', callback=bstop, options= {'disp': True})
iters.append(it)
feval.append(output.nfev)
sol.append(output.x)
objective.append(output.fun)
| apache-2.0 |
rl-institut/reegis-hp | reegis_hp/berlin_hp/berlin_brdbg_example_opt.py | 7 | 9772 | #!/usr/bin/python3
# -*- coding: utf-8
import logging
import pandas as pd
import numpy as np
from oemof import db
from oemof.db import tools
from oemof.db import powerplants as db_pps
from oemof.db import feedin_pg
from oemof.tools import logger
from oemof.core import energy_system as es
from oemof.solph import predefined_objectives as predefined_objectives
from oemof.core.network.entities import Bus
from oemof.core.network.entities.components import sources as source
from oemof.core.network.entities.components import sinks as sink
from oemof.core.network.entities.components import transformers as transformer
from oemof.core.network.entities.components import transports as transport
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
# Plant and site parameter
site = {'module_name': 'Yingli_YL210__2008__E__',
'azimuth': 0,
'tilt': 0,
'albedo': 0.2,
'hoy': 8760,
'h_hub': 135,
'd_rotor': 127,
'wka_model': 'ENERCON E 126 7500',
'h_hub_dc': {
1: 135,
2: 78,
3: 98,
4: 138,
0: 135},
'd_rotor_dc': {
1: 127,
2: 82,
3: 82,
4: 82,
0: 127},
'wka_model_dc': {
1: 'ENERCON E 126 7500',
2: 'ENERCON E 82 3000',
3: 'ENERCON E 82 2300',
4: 'ENERCON E 82 2300',
0: 'ENERCON E 126 7500'},
}
define_elec_buildings = [
{'annual_elec_demand': 2000,
'selp_type': 'h0'},
{'annual_elec_demand': 2000,
'selp_type': 'g0'},
{'annual_elec_demand': 2000,
'selp_type': 'i0'}]
define_heat_buildings = [
{'building_class': 11,
'wind_class': 0,
'annual_heat_demand': 5000,
'shlp_type': 'efh'},
{'building_class': 5,
'wind_class': 1,
'annual_heat_demand': 5000,
'shlp_type': 'mfh'},
{'selp_type': 'g0',
'building_class': 0,
'wind_class': 1,
'annual_heat_demand': 3000,
'shlp_type': 'ghd'}]
# emission factors in t/MWh
co2_emissions = {}
co2_emissions['lignite'] = 0.111 * 3.6
co2_emissions['hard_coal'] = 0.0917 * 3.6
co2_emissions['natural_gas'] = 0.0556 * 3.6
co2_emissions['mineral_oil'] = 0.0750 * 3.6
co2_emissions['waste'] = 0.0750 * 3.6
co2_emissions['biomass'] = 0.0750 * 3.6
eta_elec = {}
eta_elec['lignite'] = 0.35
eta_elec['hard_coal'] = 0.39
eta_elec['natural_gas'] = 0.45
eta_elec['mineral_oil'] = 0.40
eta_elec['waste'] = 0.40
eta_elec['biomass'] = 0.40
opex_var = {}
opex_var['lignite'] = 22
opex_var['hard_coal'] = 25
opex_var['natural_gas'] = 22
opex_var['mineral_oil'] = 22
opex_var['waste'] = 22
opex_var['biomass'] = 22
opex_var['solar_power'] = 1
opex_var['wind_power'] = 1
capex = {}
capex['lignite'] = 22
capex['hard_coal'] = 25
capex['natural_gas'] = 22
capex['mineral_oil'] = 22
capex['waste'] = 22
capex['biomass'] = 22
capex['solar_power'] = 1
capex['wind_power'] = 1
# price for resource
price = {}
price['lignite'] = 60
price['hard_coal'] = 60
price['natural_gas'] = 60
price['mineral_oil'] = 60
price['waste'] = 60
price['biomass'] = 60
de_en = {
'Braunkohle': 'lignite',
'Steinkohle': 'hard_coal',
'Erdgas': 'natural_gas',
'Öl': 'mineral_oil',
'Solarstrom': 'solar_power',
'Windkraft': 'wind_power',
'Biomasse': 'biomass',
'Wasserkraft': 'hydro_power',
'Gas': 'methan'}
en_de = {
'lignite': 'Braunkohle',
'hard_coal': 'Steinkohle',
'natural_gas': 'Erdgas',
'mineral_oil': 'Öl'}
opex_fix = {}
resource_buses = {
'global': ['hard_coal', 'lignite', 'oil'],
'local': ['natural_gas']}
translator = lambda x: de_en[x]
def get_demand():
'Dummy function until real function exists.'
demand_df = pd.DataFrame()
demand_df['elec'] = np.random.rand(8760) * 10 ** 11
return demand_df
def entity_exists(esystem, uid):
return len([obj for obj in esystem.entities if obj.uid == uid]) > 0
def create_entity_objects(esystem, region, pp, tclass, bclass):
''
if entity_exists(esystem, ('bus', region.name, pp[1].type)):
logging.debug('Bus {0} exists. Nothing done.'.format(
('bus', region.name, pp[1].type)))
location = region.name
elif entity_exists(esystem, ('bus', 'global', pp[1].type)):
logging.debug('Bus {0} exists. Nothing done.'.format(
('bus', 'global', pp[1].type)))
location = 'global'
else:
print()
logging.debug('Creating Bus {0}.'.format(
('bus', region.name, pp[1].type)))
bclass(uid=('bus', region.name, pp[1].type), type=pp[1].type,
price=price[pp[1].type], regions=[region], excess=False)
location = region.name
source.Commodity(
uid='rgas',
outputs=[obj for obj in esystem.entities if obj.uid == (
'bus', location, pp[1].type)])
tclass(
uid=('transformer', region.name, pp[1].type),
inputs=[obj for obj in esystem.entities if obj.uid == (
'bus', location, pp[1].type)],
outputs=[obj for obj in region.entities if obj.uid == (
'bus', region.name, 'elec')],
in_max=[None],
out_max=[float(pp[1].cap)],
eta=[eta_elec[pp[1].type]],
opex_var=opex_var[pp[1].type],
regions=[region])
logger.define_logging()
year = 2010
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
overwrite = False
overwrite = True
conn = db.connection()
# Create a simulation object
simulation = es.Simulation(
timesteps=range(len(time_index)), verbose=True, solver='gurobi',
debug=True,
objective_options={'function': predefined_objectives.minimize_cost})
# Create an energy system
TwoRegExample = es.EnergySystem(time_idx=time_index, simulation=simulation)
# Add regions to the energy system
TwoRegExample.regions.append(es.Region(
geom=tools.get_polygon_from_nuts(conn, 'DE3'),
name='Berlin'))
TwoRegExample.regions.append(es.Region(
geom=tools.get_polygon_from_nuts(conn, 'DE4'),
name='Brandenburg'))
# Create global buses
Bus(uid=('bus', 'global', 'coal'), type='coal', price=60, sum_out_limit=10e10,
excess=False)
Bus(uid=('bus', 'global', 'lignite'), type='lignite', price=60,
sum_out_limit=10e10, excess=False)
# Create entity objects for each region
for region in TwoRegExample.regions:
logging.info('Processing region: {0} ({1})'.format(
region.name, region.code))
# Get demand time series and create buses. One bus for each demand series.
demand = get_demand()
for demandtype in demand.keys():
Bus(uid=('bus', region.name, demandtype), type=demandtype, price=60,
regions=[region], excess=False)
sink.Simple(
uid=('sink', region.name, demandtype),
inputs=[obj for obj in TwoRegExample.entities
if obj.uid == ('bus', region.name, demandtype)],
val=demand[demandtype],
region=[region])
# Create source object
feedin_pg.Feedin().create_fixed_source(
conn, region=region, year=TwoRegExample.time_idx.year[0],
bustype='elec', **site)
# Get power plants from database and write them into a DataFrame
pps_df = db_pps.get_bnetza_pps(conn, region.geom)
print(pps_df)
# Add aditional power plants to the DataFrame
pps_df.loc[len(pps_df)] = 'natural_gas', np.nan, 10 ** 12
# TODO: Summerize power plants of the same type
for pwrp in pps_df.iterrows():
create_entity_objects(TwoRegExample, region, pwrp,
tclass=transformer.Simple, bclass=Bus)
# create storage transformer object for storage
# transformer.Storage.optimization_options.update({'investment': True})
bel = [obj for obj in TwoRegExample.entities
if obj.uid == ('bus', region.name, demandtype)]
transformer.Storage(uid=('sto_simple', region.name, 'elec'),
inputs=bel,
outputs=bel,
eta_in=1,
eta_out=0.8,
cap_loss=0.00,
opex_fix=35,
opex_var=0,
capex=1000,
cap_max=10 ** 12,
cap_initial=0,
c_rate_in=1/6,
c_rate_out=1/6)
# Connect the electrical bus of region StaDes und LanWit.
bus1 = [obj for obj in TwoRegExample.entities if obj.uid == (
'bus', 'Berlin', 'elec')][0]
bus2 = [obj for obj in TwoRegExample.entities if obj.uid == (
'bus', 'Brandenburg', 'elec')][0]
TwoRegExample.connect(bus1, bus2, in_max=10 * 10 ** 12, out_max=0.9 * 10 ** 12,
eta=0.9, transport_class=transport.Simple)
#pv_lk_wtb = ([obj for obj in TwoRegExample.entities if obj.uid == (
# 'FixedSrc', 'Landkreis Wittenberg', 'pv_pwr')][0])
## Multiply PV with 25
#pv_lk_wtb.val = pv_lk_wtb.val * 25
# Remove orphan buses
buses = [obj for obj in TwoRegExample.entities if isinstance(obj, Bus)]
for bus in buses:
if len(bus.inputs) > 0 or len(bus.outputs) > 0:
logging.debug('Bus {0} has connections.'.format(bus.type))
else:
logging.debug('Bus {0} has no connections and will be deleted.'.format(
bus.type))
TwoRegExample.entities.remove(bus)
TwoRegExample.simulation = es.Simulation(
solver='gurobi', timesteps=[t for t in range(8760)],
stream_solver_output=True, objective_options={
'function': predefined_objectives.minimize_cost})
for entity in TwoRegExample.entities:
entity.uid = str(entity.uid)
# Optimize the energy system
TwoRegExample.optimize()
logging.info(TwoRegExample.dump())
| gpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/bin/JinjaPPExtension.py | 1 | 18508 | #!/bin/bash
"exec" "python" "$0" "$@"
from os import sys
sys.path += ['/usr/texbin','', '//anaconda/lib/python27.zip', '//anaconda/lib/python2.7', '//anaconda/lib/python2.7/plat-darwin', '//anaconda/lib/python2.7/plat-mac', '//anaconda/lib/python2.7/plat-mac/lib-scriptpackages', '//anaconda/lib/python2.7/lib-tk', '//anaconda/lib/python2.7/lib-old', '//anaconda/lib/python2.7/lib-dynload', '//anaconda/lib/python2.7/site-packages', '//anaconda/lib/python2.7/site-packages/PIL', '//anaconda/lib/python2.7/site-packages/setuptools-2.1-py2.7.egg']
from jinja2 import nodes, contextfunction
from jinja2.ext import Extension
from jinja2 import Environment, FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError
import numpy as np
from sympy import Symbol, sympify, lambdify, latex
import sympy as sp
import matplotlib.pyplot as plot
import subprocess
import re
class PPException(Exception):
pass
class PPExtension(Extension):
tags = set(['figure', 'table', 'calcTable', 'evaluate', 'evaltex'])
def __init(self, environment):
super(PPExtension, self).__init__(environment)
#add defaults
environment.extend(error_calculation='gauss', data_mode='tuples', print_figure_for_each_table=True)
def parse(self, parser):
#the token
lineno = parser.stream.next()
linnum = lineno.lineno
if(lineno.value == 'figure'):
#ther argument
arg = [parser.parse_expression()]
#the figure data
if (parser.stream.skip_if('comma')):
arg.append(parser.parse_expression())
body = parser.parse_statements(['name:endfigure'], drop_needle=True)
return nodes.CallBlock(self.call_method('_create_figure', arg),[], [], body).set_lineno(linnum)
elif(lineno.value == 'table'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_print_latex_table', arg)])
elif(lineno.value == 'evaluate'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_evaluate_function', arg)])
elif(lineno.value == 'evaltex'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_evaltex_function', arg)])
elif( lineno.value == 'calcTable'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_calcTable_function', arg)])
#body = parser.parse_statements(['name:endfigure'], drop_needle=True)
return nodes.Const(None)
def _getValue(self, r,c,table, regExps):
try:
print("is it a value?")
return np.round(float(table[r,c]), 6)
except(ValueError):
print("no it's not")
#got string try parse it
#for reg in regExps:
val = self._parseValue(r,c,table[r,c],table, regExps)
if val is not None: return val
return 0
def _parseValue(self, row, column, entry, table, regExps):
value = 0
print('sp lets try parse it')
for reg in regExps:
temp = reg.finditer(entry)
cor= 0
if temp:
for match in temp:
tup = match.group().replace('$', '')
print(tup)
r,c = tup.split(',')
r = row if int(r) < 0 else r
c = column if int(c) < 0 else c
print(r,c)
tmpVal = str(self._getValue(r,c,table, regExps))
entry = entry[0:match.start()-cor] + tmpVal + entry[match.end()-cor:]
cor += len(match.group()) - len(tmpVal)
try:
value = eval(entry)
except(Exception):
return None
return np.round(value, 6)
def _calcTable_function(self, data):
xheader = data['xheader']
yheader = data['yheader']
#build up the regex for formula $1,2$ means second row third column $(0:1,0:1)$ the rect (0,0) - (0,1) which yields an array with every entry putting them into an array with same dimension
# | |
# (1,0) - (1,1)
#replace every placeholder with the value, putting 0 if the index is not valid
singleVal = re.compile('\$-?\d*,-?\d*\$')
table = np.array(data['table'])
print table
for row in range(np.shape(table)[0]):
print(row)
for column in range(np.shape(table)[1]):
print ("parse (",row,column,")")
blub = []
blub.append(singleVal)
value = self._getValue(row, column, table, blub)
print(value)
table[row,column] = value
datArr = {}
print('table construction completed')
datArr['extended'] = True
datArr['xheader'] = xheader
datArr['yheader'] = yheader
datArr['xdata'] = []
print('building up data array')
for c in range(np.shape(table)[1]):
print(c)
datArr['xdata'].append(table[:,c].tolist())
datArr['desc'] = data['desc']
figstr = ''
if 'figure' in data:
print( data['figure'])
for fig in data['figure']:
xrow = int(fig['xrow'])
yrow = int(fig['yrow'])
print(xrow, yrow)
xdata = table[:,xrow].astype(np.float)
ydata = table[:,yrow].astype(np.float)
print(xdata, ydata)
xmin = np.min(xdata)
print(xmin)
xmax = np.max(xdata)
ymin = np.min(ydata)
ymax = np.max(ydata)
print(xmin,xmax,ymin,ymax)
rang = [xmin, xmax, ymin, ymax]
print (rang)
title = fig['title']
desc = data['desc']
ylabel = fig['ylabel']
xlabel = fig['xlabel']
ref = fig['ref']
figureArray = {}
figureArray['xdata'] = xdata.tolist()
figureArray['ydata'] = ydata.tolist()
figureArray['title'] = title
figureArray['desc'] = desc
figureArray['range'] = rang
if 'interpolate' in fig:
figureArray['dim'] = fig['dim']
figureArray['interpolate'] = fig['interpolate']
if 'slope' in fig:
figureArray['slope'] = fig['slope']
print('try creating figure')
figstr += self._create_figure(ref, {'data': [figureArray], 'ylabel':ylabel, 'xlabel':xlabel}, fig['caption'])
print('try printing the table')
return self._print_latex_table(datArr) + figstr
def _evaltex_function(self, data):
try:
s = sympify(data['function'])
except:
raise TemplateSyntaxError("could not parse formula", 01)
try:
l = latex(s)
s = s.doit()
except:
raise TemplateSyntaxError("could either not make latex output or simpify", 1)
l2 = None
#errors is ignoring step
if 'step' in data:
l2 = latex(s)
#print(latex(s))
vals = []
syms = []
indep = []
unindep = []
try:
print(data['symbols'])
for symbol in data['symbols']:
print(symbol)
print(symbol['sym'], symbol['val'])
syms.append(Symbol(symbol['sym']))
vals.append(symbol['val'])
if 'indep' in symbol:
indep.append([syms[-1], symbol['uncert'], vals[-1]])
else:
unindep.append([syms[-1], vals[-1]])
except:
raise TemplateSyntaxError("something went wrong parsing symbols", 100)
#print(syms, vals)
print(syms, vals, indep, s)
try:
my_function = lambdify(syms, s, 'numpy')
result = my_function(*vals)
print("check if error is set", result)
if 'errors' in data:
#start looping through all variables in an extra arra
error_terms = 0
partial_terms = []
partial_terms_squared = []
uncerts = []
print(l + " = " + str(result))
try:
for ind in indep:
#loop through variables
d = Symbol('s_' + ind[0].name)
partial = sp.diff(s, ind[0]) * d/s
partial_terms.append(partial)
partial_terms_squared.append(partial**2)
error_terms = error_terms + partial**2
uncerts.append([d, str(ind[1])])
except:
raise TemplateSyntaxError("error on building up error_terms", 15)
#make substitutions
print("begin substitution", error_terms)
error_terms = error_terms**0.5
ptsv1 = []
try:
for pt in partial_terms_squared:
ptsv = pt
print("substitution started" )
#substitue first all dependend variables
for ind in indep:
print(ind)
try:
ptsv = ptsv.subs(ind[0], ind[-1])
ptsv = ptsv.subs('s_' + ind[0].name, ind[1])
except:
raise TemplateSyntaxError("Could not substitued dependend var", 100)
for unind in unindep:
print(unind)
try:
ptsv = ptsv.subs(unind[0], unind[1])
except:
raise TemplateSyntaxError("Could not substitued undependend var", 100)
ptsv1.append(ptsv)
except:
raise TemplateSyntaxError("the substitution failed for error calculation", 10)
#error
uval = sp.sqrt(sum(ptsv1))
rresult = np.round(result, data['digits'] if 'digits' in data else 5)
print(rresult)
print(uval)
error = (uval * result).round(data['digits'] if 'digits' in data else 5)
print(rresult, error)
return """\\(""" + (data['fname'] if 'fname' in data else "f") + """ = """ + l + """ = """ + str(rresult) + """ \pm """ + str(abs(error)) + (data['units'] if 'units' in data else "") + """\\)
Error is calculated according to standard error propagation:
\\begin{dmath}
s_{""" + (data['fname'] if 'fname' in data else "f") +"""} = """ + latex(error_terms) + """ = """ + str(abs(error.round(data['digits'] if 'digits' in data else 5))) +(data['units'] if 'units' in data else "" )+ """
\\end{dmath}
with uncertainities: \\(""" + ",".join([latex(cert[0]) + ' = ' + cert[1] for cert in uncerts]) +"""\\)
"""
#print(result)
except:
raise TemplateSyntaxError("could not evaluate formula", 100)
try:
if 'supRes' in data:
return l
elif 'step' in data:
return l + " = " + l2 + " = " + str(result)
return l + " = " + str(result)
except:
raise TemplateSyntaxError("Malformed result...", 100)
# dictionary with entries
# data
# |_ [xdata, ydata, desc]
# xlabel
# ylabel
def _print_latex_table(self, data):
if 'extended' in data:
#we have in xdata an array and there is an array xheader and yheader (optional otherwise same as xheader) where xheader matches size of xdata and yheader matches size of one entry array of xdata
#at least one entry
print("latex print function", data)
ylen = len(data['xdata'][0])
#since len(xheader) and len (xdata) should match we take xheader
xlen = len(data['xheader'])
#the xheader string (since latex builds tables per row)
yheader = data['yheader'] if 'yheader' in data else []
xheader = "&" if len(yheader) >0 else ""
#xheader += "&".join(data['xheader'])
isfirst = True
for h in data['xheader']:
if isfirst:
xheader += "\\textbf{" + str(h) + "}"
isfirst = False
else:
xheader += "&\\textbf{" + str(h) + "}"
table = '\\begin{figure}\\centering\\begin{tabular}{' + 'c' * (xlen+ (1 if len(yheader) > 0 else 0)) +'}'
#table += "\\hline\n"
table += xheader + "\\\\\n\\cline{2-" + str(xlen+1) + "}"
#first = True
#now iterate over all rows, remember to print in the first row the yheader if there is one
for i in xrange(0, ylen):
first = True
if(len(yheader) > 0):
try:
table += "\\multicolumn{1}{r|}{\\textbf{" + str(data['yheader'][i]) + "}}"
except:
if i > len(data['yheader'])-1:
print("dimension of yheader is wrong")
print("ooooops there is an error in yheader")
raise TemplateSyntaxError("Yheader is wrong: probably inconsistencies in dimension", i)
for o in xrange(0,xlen):
try:
if len(yheader) >0:
if o == xlen-1:
table += "&\multicolumn{1}{c|}{" + str(data['xdata'][o][i]) + "}"
else:
print(data['xdata'][o][i])
table += "&" + str(data['xdata'][o][i])
else:
if not first:
table += "&"
first = False
if o == xlen-1:
table += "\multicolumn{1}{c|}{" + str(data['xdata'][o][i]) + "}"
else:
#print(data['xdata'][o][i])
table += str(data['xdata'][o][i])
except:
print("some error at: ", o, i)
raise TemplateSyntaxError("some error while parsing table data: ("+str(o)+","+str(i)+")" , o)
#raise PPException("Error while parsing datapoints, probably missing an entry; check dimensions")
#print(table)
table += "\\\\\\cline{2-" + str(xlen+1) + "}\n"
table += "\\end{tabular} \\caption{" + str(data['desc']) + "} \\end{figure}\n"
print (table)
else:
for tab in data['data']:
table = "\\begin{figure}\\centering\\begin{tabular}{|c|c|}"
i = 0
table += "\\hline\n"
table += str(data['xlabel']) + " & " + str(data['ylabel']) + "\\\\\n"
table += "\\hline\n"
for entry in tab['xdata']:
table += str(entry) + " & " + str(tab['ydata'][i]) + "\\\\\n"
table += "\\hline\n"
i+=1
table += "\\end{tabular} \\caption{" + str(tab['desc']) + "} \\end{figure}\n"
return table
# dictionary with entries
# data
# |_ (xdata,ydata,range = [xmin,xmax,ymin,ymax], title, interpolate)
# ylabel
# xlabel
def _create_figure(self, title, data, caller):
plot.figure()
print (data)
slopeinter = ''
#foreach data set in data print a figure
for fig in data['data']:
if 'range' in fig:
plot.axis(fig['range'])
if 'interpolate' in fig :
f = np.polyfit(fig['xdata'],fig['ydata'], fig['dim'] if 'dim' in fig else 1)
print("slope-intercept",f[0])
if 'slope' in fig:
slopeinter = "y = "+str(f[0]) + " + " + str(f[1])
#plot.annotate("y = " + f[0]+"*x + "+ f[1], xy=(1,1), xytext=(1,1.5), arrowprops=dict(facecolor='black', shrink=0.05),)
f_n = np.poly1d(f)
xnew = np.linspace(fig['range'][0], fig['range'][1], 10000)
plot.plot(xnew, f_n(xnew), label = slopeinter)
plot.plot(fig['xdata'], fig['ydata'], label=fig['title'], linestyle="solid", marker="s", markersize=7)
plot.legend()
plot.ylabel(data['ylabel'])
plot.xlabel(data['xlabel'])
file = plot.savefig(title.replace(" ","")+".png")
print file
return u"""
\\begin{figure}[ht!]
\centering
\includegraphics[width=\\textwidth]{""" + title.replace(" ","") + """.png}
\\caption{"""+(caller().strip() if type(caller) is not str else caller.strip())+u""" \\label{fig:""" + title + """}}
\\end{figure}\n"""
#return nodes.
#dictionary for evaluating functions
# variables - [] (use default values as initialization of data)
# function - str
def _evaluate_function(self, data):
funcstr = """\
def evalFunc(""" + ",".join(data['variables']) +"""):
return {e}""".format(e=data['function'])
exec(funcstr)
return evalFunc()
env = Environment(extensions=[PPExtension], loader=FileSystemLoader('.'))
t = env.get_template(sys.argv[2])
f = open(sys.argv[2] + "tmp", 'w')
f.write(t.render())
cmdstr = "xelatex -interaction=nonstopmode " + sys.argv[1] + " " + f.name
f.close()
print subprocess.Popen( cmdstr, shell=True, stdout=subprocess.PIPE ).stdout.read()
#os.system("open " + os.path.splitext(os.path.basename(sys.argv[1]))[0] + ".pdf")
| mit |
MohammedWasim/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
sao-eht/lmtscripts | pointing_lmt2018.py | 1 | 36718 | import numpy
import matplotlib
import shutil
# matplotlib.use('agg')
from matplotlib import pylab, mlab, pyplot
import os
np = numpy
plt = pyplot
# plt.ion()
from argparse import Namespace
from glob import glob
import scipy.io
from scipy.signal import butter,lfilter,freqz
from scipy.interpolate import interp1d
#from scipy.ndimage.filters import minimum_filter1dar
from scipy.interpolate import UnivariateSpline
from matplotlib.mlab import griddata, psd
from datetime import datetime, timedelta
from scipy.optimize import fmin
#pathname_12bit = '../obsfiles/ifproc_2018-04-21_%06d_01_0000.nc'
#pathname_16bit = '../obsfiles/lmttpm_2018-04-21_%06d_01_0000.nc'
pathname_12bit = '../obsfiles/ifproc_2018-*-*_%06d_01_0000.nc'
pathname_16bit = '../obsfiles/lmttpm_2018-*-*_%06d_01_0000.nc'
def asec2rad(asec):
return asec * 2*np.pi / 3600. / 360.
def rad2asec(rad):
return rad * 3600. * 360. / (2*np.pi)
###################
def focus(first, last, plot=False, point=False, win_pointing=5., win_focusing=5., res=2., fwhm=7., channel='tp12bit', z0search=20., alphasearch=20., disk_diameter=0.):
plt.close('all')
if point:
print 'pointing'
out = pointing_lmt2017(first, last=last, plot=plot, win=win_pointing, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
imax = np.argmax(out.snr.ravel())
(xmax, ymax) = (out.xx.ravel()[imax], out.yy.ravel()[imax])
else:
xmax = 0.
ymax = 0.
scans = range(first, last+1)
focus_subset(scans, x0=xmax, y0=ymax, plot=plot, win_pointing=win_pointing, win_focusing=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch, disk_diameter=disk_diameter)
def focus_subset(scans, x0=0., y0=0., plot=False, win_pointing=50., win_focusing=5., res=2., fwhm=7., channel='tp12bit', z0search=20., alphasearch=20., disk_diameter=0.):
focusing_parabolicfit_lmt2017(scans, plot=plot, win=win_pointing, channel=channel, disk_diameter=disk_diameter)
focusing_matchfilter_lmt2017(scans, x0=x0, y0=y0, win=win_focusing, res=res, fwhm=fwhm, channel=channel, z0search=z0search, alphasearch=alphasearch)
###########################################################
# Based on scitools meshgrid
def meshgrid_lmtscripts(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
################### EXTRACT INFORMATION ###################
# extract 1mm total power data and fix some timing jitter issues
def extract(nc):
t0 = nc.variables['Data.TelescopeBackend.TelTime'].data[0]
t = nc.variables['Data.TelescopeBackend.TelTime'].data - t0
try:
a = -nc.variables['Data.LmtTpm.Signal'].data[:,0]
b = a
except:
print("Using 12 bit signal")
try:
b = np.sum(nc.variables['Data.IfProc.BasebandLevel'].data, axis=1) / nc.variables['Data.IfProc.BasebandLevel'].data.shape[1]
a = b
except:
print("Using 16 bit signal")
x = nc.variables['Data.TelescopeBackend.TelAzMap'].data
y = nc.variables['Data.TelescopeBackend.TelElMap'].data
i = ~nc.variables['Data.TelescopeBackend.BufPos'].data.astype(np.bool)
iobs = nc.variables['Header.Dcs.ObsNum'].data
if iobs >= 70000: # move to 50 Hz sampling to avoid ADC time glitches
fs = (1./np.mean(np.diff(t)))
tnew = nc.variables['Data.TelescopeBackend.TelTime'].data - nc.variables['Data.TelescopeBackend.TelTime'].data[0]
idx = tnew <= t[-1]
a = a[idx]
b = b[idx]
tnew = tnew[idx]
elif iobs >= 39150: # move to 50 Hz sampling to avoid ADC time glitches
fs = 50.
tnew = nc.variables['Data.TelescopeBackend.TelTime'].data - nc.variables['Data.TelescopeBackend.TelTime'].data[0]
idx = tnew <= t[-1]
a = a[idx]
b = b[idx]
tnew = tnew[idx]
elif iobs >= 38983: # kamal includes gap times
tnew = np.linspace(0, t[-1], len(t))
fs = 1./(t[1]-t[0])
adctime = nc.variables['Data.TelescopeBackend.TelTime'].data - nc.variables['Data.TelescopeBackend.TelTime'].data[0]
tnew = np.linspace(0, adctime[-1], len(adctime))
tnew = tnew[(tnew <= t[-1])]
a = interp1d(adctime, a)(tnew)
b = interp1d(adctime, b)(tnew)
elif iobs >= 38915: # 83.3 Hz becomes available but has gaps
fs = 1./0.012
tnew = np.arange(0, t[-1] + 1e-6, 1./fs)
a = interp1d(t, a)(tnew) # t is not a great varialbe to use, but all we have
b = interp1d(t, b)(tnew) # t is not a great varialbe to use, but all we have
else: # we are in 10 Hz data
fs = 10.
tnew = np.arange(0, t[-1] + 1e-6, .10)
a = interp1d(t, a)(tnew)
b = interp1d(t, b)(tnew)
x = interp1d(t, x)(tnew)
y = interp1d(t, y)(tnew)
i = interp1d(t, i)(tnew).astype(bool)
t = tnew
#iobs = nc.hdu.header.ObsNum[0]
source = ''.join(nc.variables['Header.Source.SourceName'])
return Namespace(t0=t0, t=t, tp16bit=a, tp12bit=b, x=x, y=y, i=i, iobs=iobs, source=source, fs=fs)
def rawopen(iobs, channel='tp12bit'):
from scipy.io import netcdf
if channel=='tp12bit':
filename = glob(pathname_12bit % iobs)[-1]
elif channel=='tp16bit':
filename = glob(pathname_16bit % iobs)[-1]
else:
print('ERROR: you can only specify tp12bit or tp16bit for the channel')
nc = netcdf.netcdf_file(filename)
keep = Namespace()
keep.BufPos = nc.variables['Data.TelescopeBackend.BufPos'].data
keep.Time = nc.variables['Data.TelescopeBackend.TelTime'].data
keep.XPos = nc.variables['Data.TelescopeBackend.TelAzMap'].data
keep.YPos = nc.variables['Data.TelescopeBackend.TelElMap'].data
if channel=='tp16bit':
keep.APower = -nc.variables['Data.LmtTpm.Signal'].data[:,0]
keep.BPower = keep.APower
elif channel=='tp12bit':
keep.BPower = np.sum(nc.variables['Data.IfProc.BasebandLevel'].data, axis=1) / nc.variables['Data.IfProc.BasebandLevel'].data.shape[1]
keep.APower = keep.BPower
keep.nc = nc
if 'Data.IfProc.BasebandTime' in nc.variables:
keep.ADCTime = nc.variables['Data.IfProc.BasebandTime'].data
return keep
# patch together many scans and try to align in time (to the sample -- to keep X and Y)
def mfilt(scans, channel='tp12bit'):
aps = []
bps = []
xs = []
ys = []
ts = []
ss = []
fss = []
zs = []
ntaper = 100
for i in sorted(scans):
keep = rawopen(i, channel=channel)
scan = extract(keep.nc)
aps.append(detrend(scan.tp16bit, ntaper=ntaper))
bps.append(detrend(scan.tp12bit, ntaper=ntaper))
ts.append(scan.t + scan.t0)
xs.append(scan.x)
ys.append(scan.y)
ss.append(scan.source)
fss.append(scan.fs)
zs.append(keep.nc.variables['Header.M2.ZReq'].data)
flag = 1
for s1 in range(0,len(ss)):
for s2 in range(s1,len(ss)):
if (ss[s1] != ss[s2]):
flag = 0
print('WARNING: NOT THE SAME SOURCE!!')
print ss
break
s = ss[0]
fs = fss[0]
t0 = ts[0][0]
t1 = ts[-1][-1]
tnew = np.arange(t0, t1+1./fs, 1./fs)
idx = np.zeros(len(tnew), dtype=np.bool)
x = np.zeros(len(tnew))
y = np.zeros(len(tnew))
a = np.zeros(len(tnew))
b = np.zeros(len(tnew))
for i in range(len(ts)):
istart = int(np.round((ts[i][0] - t0) * 50.))
idx[istart:istart+len(ts[i])] = True
x[istart:istart+len(xs[i])] = xs[i][:len(x)-istart]
y[istart:istart+len(ys[i])] = ys[i][:len(y)-istart]
a[istart:istart+len(aps[i])] = aps[i][:len(a)-istart]
b[istart:istart+len(bps[i])] = bps[i][:len(b)-istart]
x[~idx] = np.inf
y[~idx] = np.inf
fillfrac = float(np.sum(idx)-ntaper*len(scans)) / len(tnew)
return Namespace(t=tnew, tp16bit=a, tp12bit=b, x=x, y=y, z=zs, idx=idx, source=s, fs=fs, fillfrac=fillfrac)
################### POINTING & FOCUSING ###################
def pointing_lmt2018(first, last=None, plot=True, win=10., res=0.5, fwhm=7., channel='tp12bit', disk_diameter=0.):
if last is None:
last = first
scans = range(first, last+1)
out = pointing_lmt2018_wrapper(scans, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
return out
def pointing_lmt2018_wrapper(scans, plot=True, win=10., res=0.5, fwhm=7., channel='tp12bit', disk_diameter=0.):
############## pointing #############
z = mfilt(scans, channel)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
# get the prob of a each location in the map being the point source and rename the variables
out = fitmodel_lmt2018(z, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, pcum) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
############## compute statistics #############
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the source voltage within 3 sigma
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
############## plotting #############
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(v, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
h1 = plt.contour(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
imax = np.argmax(snr.ravel())
(xmax, ymax) = (xxa.ravel()[imax], yya.ravel()[imax])
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (xmax, ymax), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f $\pm$ %.1f mV]' % (sourcevoltage_expvalue, sourcevoltage_stdev), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scans:' + str(scans[0]) + '-' + str(scans[-1]))
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
############## return #############
return out
def focusing_parabolicfit_lmt2018(scans, plot=True, win=10., res=0.5, fwhm=7., channel='tp12bit', disk_diameter=0.):
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan, channel).nc.variables['Header.M2.ZReq'].data)
out = pointing_lmt2018(scan, plot=plot, win=win, res=res, fwhm=fwhm, channel=channel, disk_diameter=disk_diameter)
(xxa, yya, snr, v, prob, cumulative_prob) = (out.xx, out.yy, out.snr, out.v, out.prob, out.pcum)
# KATIE: DOESN'T THIS ONLY WORK IF YOU ONLY HAVE 1 PEAK???
# get the indices of the points on the map within 3 sigma and extract those voltages and probablities
indices_3sigma = (cumulative_prob.ravel() < 0.99730020393673979)
voltages_3sigma = v.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the source voltage within 3 sigma
sourcevoltage_expvalue = np.sum(voltages_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
voltage_squareddiff = (voltages_3sigma - sourcevoltage_expvalue)**2
sourcevoltage_stdev = np.sqrt(np.sum(voltage_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
vmeans.append(sourcevoltage_expvalue)
vstds.append(sourcevoltage_stdev)
plt.figure(); plt.errorbar(z_position, vmeans, yerr=vstds)
############ LEAST SQUARES FITTING ################
A = np.vstack([np.ones([1, len(z_position)]), np.array(z_position), np.array(z_position)**2]).T
meas = np.array(vmeans)
meas_cov = np.diag(np.array(vstds)**2)
polydeg = 2
scale = 1e5
polyparams_cov = scale*np.eye(polydeg+1)
polyparams_mean = np.zeros([polydeg+1])
intTerm = np.linalg.inv(meas_cov + np.dot(A, np.dot(polyparams_cov, A.T)))
est_polyparams = polyparams_mean + np.dot(polyparams_cov, np.dot(A.T, np.dot( intTerm, (meas - np.dot(A, polyparams_mean)) ) ) )
error_polyparams = polyparams_cov - np.dot(polyparams_cov, np.dot(A.T, np.dot(intTerm, np.dot(A, polyparams_cov)) ) )
#print 'estimated polyparams'
#print est_polyparams
#print 'estimated error'
#print error_polyparams
p = np.poly1d(est_polyparams[::-1])
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
##################################################
#vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True, w=1/np.array(vstds))
#vmean_fit = vmean_fit_flipped[::-1]
#p = np.poly1d(vmean_fit)
#znews = np.linspace(np.min(z_position), np.max(z_position),100)
#pnews = p(znews)
#plt.plot(znews, pnews)
#plt.text(-1.4, 210., '[estimated $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0, z0_approxstdev), va='top', ha='left', color='black')
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
def focusing_matchfilter_lmt2018(scans, x0=0, y0=0, win=50., res=2., fwhm=7., channel='tp12bit', alpha_min=0., alpha_max=20., disk_diameter=0., z0search=20., alphasearch=20., plot=True):
all_scans = mfilt(scans, channel)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(all_scans.x))))
zpos = []
xpos = []
ypos = []
meas_whitened = []
N_s = []
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1), channel)
meas = scan.__dict__[channel]
N_s.append(len(scan.t))
maxN = np.max(N_s)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(maxN)))
for scan_num in scans:
scan = mfilt(range(scan_num,scan_num+1),channel)
# place the measurements into meas_pad so that its padded to be of a power 2 length
meas = scan.__dict__[channel]
# original sequence length
N = len(scan.t)
if scan_num == scans[0]:
whiteningfac = whiten_measurements(all_scans, pad, channel=channel)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
meas_whitened.append(meas_rfft_conj_white)
zpos.append(scan.z[0])
xpos.append(scan.x)
ypos.append(scan.y)
z0_min = min(zpos)
z0_max = max(zpos)
z0s = np.linspace(z0_min, z0_max, z0search)
alphas = np.linspace(alpha_min, alpha_max,alphasearch)
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(x0-win, x0+win+res, res))
y = asec2rad(np.arange(y0-win, y0+win+res, res))
#(z0s_grid, alphas_grid, xx_grid, yy_grid) = np.meshgrid(z0s, alphas, x, y) # search grid
(z0s_grid, alphas_grid, xx_grid, yy_grid) = meshgrid_lmtscripts(z0s, alphas, x, y) # search grid
zr = z0s_grid.ravel()
ar = alphas_grid.ravel()
xr = xx_grid.ravel()
yr = yy_grid.ravel()
count = 0.
num_zs = len(zpos)
model_pad = np.zeros(pad)
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (ztest, atest, xtest, ytest) in zip(zr, ar, xr, yr):
#print count/len(zr)
if disk_diameter > 0:
models = focus_model_disk(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest, disk_diameter=disk_diameter, res=0.2)
else:
models = focus_model(xpos, ypos, zpos, x0=xtest, y0=ytest, fwhm=fwhm, z0=ztest, alpha=atest)
snr = 0.0
norm = 0.0
for s in range(0,num_zs):
N = len(models[s])
# compute the ideal model in the time domain
model_pad[:N] = models[s]
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(model_pad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = norm + np.sum(np.abs(model_rfft_white)**2)
snr = snr + ( np.sum((model_rfft_white * meas_whitened[s]).real) )
norm = np.sqrt(norm)
norms.append(norm)
snrs.append(snr/norm)
count = count + 1.
# compute probablity and cumulative probabilities
isnr = np.argsort(np.array(snrs).ravel())[::-1] # reverse sort high to low
prob = np.exp((np.array(snrs).ravel()/np.sqrt(num_zs * pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(z0s_grid.shape) / np.sum(prob)
# get the indices of the points on the map within 3 sigma and extract those z0s and probablities
indices_3sigma = (pcum.ravel() < 0.99730020393673979)
z0s_3sigma = z0s_grid.ravel()[indices_3sigma]
prob_3sigma = prob.ravel()[indices_3sigma]
# compute the expected value of the z0 within 3 sigma
z0_expvalue = np.sum(z0s_3sigma * prob_3sigma) / np.sum(prob_3sigma) # expectation value of v3s
# compute the variance of the source voltage within 3 sigma
z0_squareddiff = (z0s_3sigma - z0_expvalue)**2
z0_variance = np.sqrt(np.sum(z0_squareddiff * prob_3sigma) / np.sum(prob_3sigma)) # std
imax = np.argmax(np.array(snrs).ravel())
(zmax, amax, xmax, ymax) = (zr.ravel()[imax], ar.ravel()[imax], xr.ravel()[imax], yr.ravel()[imax])
print 'estimated z0'
print zmax
if plot:
plt.figure()
plt.clf()
loc = np.unravel_index(imax, xx_grid.shape)
reshape_snr = np.array(snrs).reshape(z0s_grid.shape)
slice_snr = reshape_snr[:,:,loc[2],loc[3]]
plt.imshow(slice_snr, extent=(z0_min, z0_max, alpha_min, alpha_max), aspect=(z0_max-z0_min)/(alpha_max-alpha_min), interpolation='nearest', origin='lower', cmap='Spectral_r')
h1 = plt.contour(z0s_grid[:,:,loc[2],loc[3]], alphas_grid[:,:,loc[2],loc[3]], pcum[:,:,loc[2],loc[3]], scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
plt.plot(zmax, amax, 'y+', ms=11, mew=2)
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-(alphas[1]-alphas[0]), '[maximum $\mathbf{z}_0$: %.3f, x: %.3f, y: %.3f, alpha: %.3f]' % (zmax, rad2asec(xmax), rad2asec(ymax), amax), va='top', ha='left', color='black')
plt.text(z0_min+z0s[1]-z0s[0], alpha_max-4*(alphas[1]-alphas[0]), '[expected $\mathbf{z}_0$: %.3f $\pm$ %.3f]' % (z0_expvalue, np.sqrt(z0_variance)), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}_0$')
plt.ylabel('alpha (FWHM in arcseconds per mm offset in $\mathbf{z}$)')
plt.gca().set_axis_bgcolor('white')
plt.tight_layout()
return
def whiten_measurements(z, pad_psd, channel='tp12bit'):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# compute the psd of the voltage measurements
(p, f) = psd(meas, NFFT=1024, pad_to=4096) # unit variance -> PSD = 1 = variance of complex FFT (1/sqrt(N))
# LINDY COMMENT: we will take out the 1/Hz normalization later, to get unit variance per complex data point
if 'fillfrac' in z:
p = p / z.fillfrac # account for zeros in stiched timeseries (otherwise 1)
# sample frequencies for a sequence of length 'pad'. This should be equal to f...
freq_samples = np.abs(np.fft.fftfreq(pad_psd, d=1./2.)[:1+pad_psd/2]) # the default nyquist units
# Compute the factor that whitens the data. This is 1 over the point spread funcntion.
# Each of the signals - the model and the measurements - should be whitened by the square root of this term
whiteningfac_squared = 1. / interp1d(f, p)(freq_samples) # compute 1/PSD at the locations of the measurements B. Really this shouldn't do anything...
whiteningfac_squared[freq_samples < 0.1 * (2./Fs)] = 0. # turn off low freqs below 0.1 Hz - just an arbitrary choice
whiteningfac = np.sqrt(whiteningfac_squared)
return whiteningfac
def fitmodel_lmt2018(z, win=50., res=2., fwhm=7., channel='tp12bit', disk_diameter=0.):
Fs = z.fs
#extract the detrended voltage measurements
meas = z.__dict__[channel]
# original sequence length
N = len(z.t)
# compute pad length for efficient FFTs
pad = 2**int(np.ceil(np.log2(N)))
whiteningfac = whiten_measurements(z, pad, channel=channel)
# place the measurements into meas_pad so that its padded to be of a power 2 length
modelpad = np.zeros(pad)
meas_pad = np.zeros(pad)
meas_pad[:N] = meas # lINDY COMMENT: fails if N = len(tp) ??
# measurements of channel volatage in frequency domain
meas_rfft = np.fft.rfft(meas_pad) # N factor goes into fft, ifft = 1/N * ..
meas_rfft_conj = meas_rfft.conj();
meas_rfft_conj_white = meas_rfft_conj * whiteningfac
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
xr = xx.ravel()
yr = yy.ravel()
count = 0;
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (xtest, ytest) in zip(xr, yr):
# compute the ideal model in the time domain
if disk_diameter>0:
modelpad[:N] = model_disk(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm, disk_diameter=disk_diameter, res=disk_diameter/8.)
else:
modelpad[:N] = model(z.x, z.y, x0=xtest, y0=ytest, fwhm=fwhm) # model signal
# convert the ideal model to the frequency domain and whiten
model_rfft = np.fft.rfft(modelpad)
model_rfft_white = model_rfft * whiteningfac
# compute the normalization by taking the square root of the whitened model spectrums' dot products
norm = np.sqrt(np.sum(np.abs(model_rfft_white)**2))
norms.append(norm)
snrs.append(np.sum((model_rfft_white * meas_rfft_conj_white).real) / norm)
count = count + 1
snr = np.array(snrs)
snr[snr < 0] = 0.
imax = np.argmax(snr) # maximum snr location
snr = snr.reshape(xx.shape)
isnr = np.argsort(snr.ravel())[::-1] # reverse sort high to low
prob = np.exp((snr.ravel()/np.sqrt(pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(xx.shape) / np.sum(prob)
xxa = xx * rad2asec(1.)
yya = yy * rad2asec(1.)
# m = model, b = measurements,
# Expected [ b_conj * (noise + amplitude*m) ]
# = Expected [b_conj*noise + b_conj*amplitude*m] = 0 + amplitude*b_conj*m
# Optimally, m = b. Therefore to get out the amplitude we would need to divide by
# b_conj*m = |model|^2 = norms^2
volts2milivolts = 1e3
voltage = volts2milivolts * snr/ np.array(norms).reshape(xx.shape)
return Namespace(xx=xxa, yy=yya, snr=snr/np.sqrt(pad/2.), v=voltage, prob=prob, pcum=pcum)
# linear detrend, use only edges
def detrend(x, ntaper=100):
x0 = np.mean(x[:ntaper])
x1 = np.mean(x[-ntaper:])
m = (x1 - x0) / len(x)
x2 = x - (x0 + m*np.arange(len(x)))
w = np.hanning(2 * ntaper)
x2[:ntaper] *= w[:ntaper]
x2[-ntaper:] *= w[-ntaper:]
return x2
def model(x, y, x0=0, y0=0, fwhm=7.):
fwhm = asec2rad(fwhm)
sigma = fwhm / 2.335
# predicted counts
m = np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))
return m
def focus_model(xpos, ypos, zs, x0=0, y0=0, fwhm=7., z0=0, alpha=0):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
m_z = amplitude_z * np.exp(-((xpos[count]-x0)**2 + (ypos[count]-y0)**2) / (2*sigma_z**2))
models.append(m_z)
count = count + 1
return models
def model_disk(xpos, ypos, x0=0, y0=0, fwhm=7., disk_diameter=0., res=2.):
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
res_rad = asec2rad(res)
sigma_pixels = sigma/res_rad
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
x = (np.arange(x0-disk_radius-3*sigma, x0+disk_radius+3*sigma+res_rad, res_rad))
y = (np.arange(y0-disk_radius-3*sigma, y0+disk_radius+3*sigma+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0 )**2 + (yy_disk- y0)**2) <= disk_radius**2 ] = 1. #1./(np.pi*disk_radius**2)
#disk = disk/np.sum(np.sum(disk))
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_pixels, mode='constant', cval=0.0)
#blurred_disk = blurred_disk*( np.sqrt(2*np.pi) * (sigma)**2 )
#blurred_disk = blurred_disk/np.sum(np.sum(blurred_disk))
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
model = interpfunc(ypos, xpos, grid=False)
#plt.figure(); plt.plot(model)
#plt.figure()
#plt.axis(aspect=1.0)
#plt.imshow(blurred_disk, extent=(rad2asec(min(x)), rad2asec(max(x)), rad2asec(min(y)), rad2asec(max(y))), interpolation='nearest', origin='lower', cmap='afmhot_r')
#plt.plot(rad2asec(xpos), rad2asec(ypos), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
return model
def focus_model_disk(xpos, ypos, zs, x0=0, y0=0, fwhm=7., z0=0, alpha=0, disk_diameter=0., res=2.):
# generate a disk image at radian positions xx and yy
disk_radius = asec2rad(disk_diameter)/2.
scaling = 10
res_rad = asec2rad(res)
x = (np.arange(x0-scaling*disk_radius, x0+scaling*disk_radius+res_rad, res_rad))
y = (np.arange(y0-scaling*disk_radius, y0+scaling*disk_radius+res_rad, res_rad))
#(xx_disk, yy_disk) = np.meshgrid(x, y) # search grid
(xx_disk, yy_disk) = meshgrid_lmtscripts(x, y) # search grid
disk = np.zeros(xx_disk.shape)
disk[ ((xx_disk-x0)**2 + (yy_disk-y0)**2) <= (disk_radius)**2 ] = 1.
fwhm2stdev_factor = 1/2.335
sigma = asec2rad(fwhm) * fwhm2stdev_factor
alpha_rad = asec2rad(alpha) * fwhm2stdev_factor
count = 0
models = []
for z in zs:
sigma_z = np.sqrt(sigma**2 + (alpha_rad*np.abs(z-z0))**2)
amplitude_z = 1/( np.sqrt(2*np.pi) * (sigma_z)**2 )
sigma_z_pixels = sigma_z/asec2rad(res)
blurred_disk = scipy.ndimage.filters.gaussian_filter(disk, sigma_z_pixels, mode='constant', cval=0.0)
#interpfunc = scipy.interpolate.RectBivariateSpline(xx_disk.flatten(), yy_disk.flatten(), blurred_disk.flatten())
interpfunc = scipy.interpolate.RectBivariateSpline(y, x, blurred_disk)
m_z = interpfunc(ypos[count], xpos[count], grid=False)
models.append(m_z)
count = count + 1
#plt.figure(); plt.imshow(blurred_disk)
return models
def gridPower(first, last=None, win=50., res=2., fwhm=7., channel='tp12bit', plot=True):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans, channel)
meas = z.__dict__[channel]
# compute the x and y coordinates that we are computing the maps over
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
#(xx, yy) = np.meshgrid(x, y) # search grid
(xx, yy) = meshgrid_lmtscripts(x, y) # search grid
gridded = scipy.interpolate.griddata(np.array([z.x[:-1], z.y[:-1]]).T , meas[:-1], (xx, yy), method='linear', fill_value=0.)
imax = np.argmax(gridded.ravel())
(xmax, ymax) = (xx.ravel()[imax], yy.ravel()[imax])
peakval = (gridded.ravel()[imax])
if plot:
plt.figure()
plt.clf()
plt.axis(aspect=1.0)
plt.imshow(gridded, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f]"' % (rad2asec(xmax), rad2asec(ymax)), va='top', ha='left', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f mV]' % (peakval*1e3), va='top', ha='right', color='black')
plt.title(z.source.strip() + ' scan:' + str(scans[0]) + '-' + str(scans[-1]) )
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
plt.tight_layout()
return peakval, xmax, ymax
def focus_origMap(first, last=None, win=50., res=2., fwhm=7., channel='tp12bit', plot=True):
if last is None:
last = first
scans = range(first, last+1)
vmeans = []
vstds = []
z_position = []
for scan in scans:
z_position.append(rawopen(scan, channel).nc.variables['Header.M2.ZReq'].data)
peakval, xmax, ymax = gridPower(scan, last=None, win=win, res=res, fwhm=fwhm, channel=channel, plot=plot)
vmeans.append(peakval)
plt.figure(); plt.plot(z_position, vmeans)
vmean_fit_flipped, stats = np.polynomial.polynomial.polyfit(np.array(z_position), np.array(vmeans), 2, rcond=None, full=True)
vmean_fit = vmean_fit_flipped[::-1]
p = np.poly1d(vmean_fit)
znews = np.linspace(np.min(z_position), np.max(z_position),100)
pnews = p(znews)
plt.plot(znews, pnews)
imax = np.argmax(pnews)
z0 = znews[imax]
print 'estimated z0'
print z0
plt.text(min(znews), min(pnews), '[peak $\mathbf{z}$: %.3f]' % (z0), va='top', ha='left', color='black')
plt.title('Focusing')
plt.xlabel('$\mathbf{z}$')
plt.ylabel('amplitude')
return
| mit |
jetuk/pywr | pywr/notebook/figures.py | 4 | 3488 | import numpy as np
import scipy.stats
import pandas
import matplotlib
import matplotlib.pyplot as plt
c = {
"Afill": "#ee1111",
"Aedge": "#660000",
"Bfill": "#1111ee",
"Bedge": "#000066",
"Cfill": "#11bb11",
"Cedge": "#008800",
}
def align_series(A, B, names=None, start=None, end=None):
"""Align two series for plotting / comparison
Parameters
----------
A : `pandas.Series`
B : `pandas.Series`
names : list of strings
start : `pandas.Timestamp` or timestamp string
end : `pandas.Timestamp` or timestamp string
Example
-------
>>> A, B = align_series(A, B, ["Pywr", "Aquator"], start="1920-01-01", end="1929-12-31")
>>> plot_standard1(A, B)
"""
# join series B to series A
# TODO: better handling of heterogeneous frequencies
df = pandas.concat([A, B], join="inner", axis=1)
# apply names
if names is not None:
df.columns = names
else:
names = list(df.columns)
# clip start and end to user-specified dates
idx = [df.index[0], df.index[-1]]
if start is not None:
idx[0] = pandas.Timestamp(start)
if end is not None:
idx[1] = pandas.Timestamp(end)
if start or end:
df = df.loc[idx[0]:idx[-1],:]
A = df[names[0]]
B = df[names[1]]
return A, B
def plot_standard1(A, B):
fig, axarr = plt.subplots(3, figsize=(10, 12), facecolor="white")
plot_timeseries(A, B, axarr[0])
plot_QQ(A, B, axarr[1])
plot_percentiles(A, B, axarr[2])
return fig, axarr
def set_000formatter(axis):
axis.set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
def plot_timeseries(A, B, ax=None):
if ax is None:
ax = plt.gca()
B.plot(ax=ax, color=c["Bfill"], clip_on=False)
A.plot(ax=ax, color=c["Afill"], clip_on=False)
ax.grid(True)
ax.set_ylim(0, None)
set_000formatter(ax.get_yaxis())
ax.set_xlabel("")
ax.legend([B.name, A.name], loc="best")
return ax
def plot_QQ(A, B, ax=None):
if ax is None:
ax = plt.gca()
ax.scatter(B.values, A.values, color=c["Cfill"], edgecolor=c["Cedge"], clip_on=False)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
limit = max(xlim[1], ylim[0])
ax.plot([0, limit], [0, limit], '-k')
ax.set_xlim(0, limit)
ax.set_ylim(0, limit)
ax.grid(True)
set_000formatter(ax.get_xaxis())
set_000formatter(ax.get_yaxis())
ax.set_xlabel(B.name)
ax.set_ylabel(A.name)
ax.legend(["Equality"], loc="best")
return ax
def plot_percentiles(A, B, ax=None):
if ax is None:
ax = plt.gca()
percentiles = np.linspace(0.001, 0.999, 1000) * 100
A_pct = scipy.stats.scoreatpercentile(A.values, percentiles)
B_pct = scipy.stats.scoreatpercentile(B.values, percentiles)
percentiles = percentiles / 100.0
ax.plot(percentiles, B_pct[::-1], color=c["Bfill"], clip_on=False, linewidth=2)
ax.plot(percentiles, A_pct[::-1], color=c["Afill"], clip_on=False, linewidth=2)
ax.set_xlabel("Cumulative frequency")
ax.grid(True)
ax.xaxis.grid(True, which="both")
set_000formatter(ax.get_yaxis())
ax.set_xscale("logit")
xticks = ax.get_xticks()
xticks_minr = ax.get_xticks(minor=True)
ax.set_xticklabels([], minor=True)
ax.set_xticks([0.01, 0.1, 0.5, 0.9, 0.99])
ax.set_xticklabels(["1", "10", "50", "90", "99"])
ax.set_xlim(0.001, 0.999)
ax.legend([B.name, A.name], loc="best")
return ax
| gpl-3.0 |
bxlab/HiFive_Paper | Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/src/hiclib/binnedData.py | 2 | 64389 | #(c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Maksim Imakaev ([email protected])
#TODO:(MIU) Write tests for this module!
"""
Binned data - analysis of HiC, binned to resolution.
Concepts
--------
class Binned Data allows low-level manipulation of multiple HiC datasets,
binned to the same resolution from the same genome.
When working with multiple datasets, all the filters will be synchronized,
so only bins present in all datasets will be considered for the analysis.
Removal of bins from one dataset will remove them from the others.
E.g. removing 1% of bins with lowest # of count might remove more than 1% of
total bins, when working with 2 or more datasets.
Class has significant knowledge about filters that have been applied.
If an essential filter was not applied, it will throw an exception;
if advised filter is not applied, it will throw a warning.
However, it does not guarantee dependencies, and you have to think yourself.
Most of the methods have an optional "force" argument that will
ignore dependencies.
We provide example scripts that show ideal protocols for certain types of
the analysis, but they don't cover the realm of all possible manipulations
that can be performed with this class.
Input data
----------
method :py:func:`SimpleLoad <binnedData.simpleLoad>` may be used to load
the data. It automatically checks for possible genome length mismatch.
This method works best with h5dict files, created by fragmentHiC.
In this case you just need to supply the filename.
It can also accept any dictionary-like object with the following keys,
where all but "heatmap" is optional.
* ["heatmap"] : all-by-all heatmap
* ["singles"] : vector of SS reads, optional
* ["frags"] : number of rsites per bin, optional
* ["resolution"] : resolution
All information about the genome, including GC content and restriction sites,
can be obtained from the Genome class.
Genomic tracks can be loaded using an automated parser that accepts bigWig
files and fixed step wiggle files.
See documentation for :py:func:`experimentalBinnedData.loadWigFile` that
describes exactly how the data is averaged and parsed.
Variables
---------
self.dataDict - dictionary with heatmaps; keys are provided when loading
the data.
self.singlesDict - dictionary with SS read vectors. Keys are the same.
self.fragsDict - dictionary with fragment density data
self.trackDict - dictionary with genomic tracks, such as GC content.
Custom tracks should be added here.
self.biasDict - dictionary with biases as calculated by
iterative correction (incomplete)
self.PCDict - dictionary with principal components of each datasets.
Keys as in dataDict
self.EigEict - dictionary with eigenvectors for each dataset.
Keys as in datadict.
Hierarchy of filters
--------------------
This hierarchy attempts to connect all logical dependencies between
filters into one diagram.
This includes both biological dependencies and programming dependencies.
As a result, it's incomplete and might be not 100% accurate.
Generally filters from the next group should be applied after filters
from previous groups, if any.
Examples of the logic are below:
* First, apply filters that don't depend on counts,
i.e. remove diagonal and low-coverage bins.
* Second, remove regions with poor coverage;
do this before chaining heatmaps with other filters.
* Fake translocations before truncating trans, as translocations are very
high-count regions, and truncTrans will truncate them, not actuall trans reads
* Faking reads currently requires zeros to be removed.
This will be changed later
* Fake cis counts after truncating trans, so that they don't get faked with
extremely high-count outliers in a trans-map
* Perform iterative correction after all the filters are applied
* Preform PCA after IC of trans data, and with zeros removed
1. Remove Diagonal, removeBySequencedCount
2. RemovePoorRegions, RemoveStandalone (this two filters are not transitive)
3. fakeTranslocations
4. truncTrans
5. fakeCis
6. iterative correction (does not require removeZeros)
7. removeZeros
8. PCA (Requires removeZeros)
9. RestoreZeros
Besides that, filter dependencies are:
* Faking reads requires: removeZeros
* PCA requires: removeZeros, fakeCis
* IC with SS requires: no previous iterative corrections, no removed cis reads
* IC recommends removal of poor regions
Other filter dependencies, including advised but not required filters, will be
issued as warnings during runtime of a program.
-------------------------------------------------------------------------------
API documentation
-----------------
"""
import os
from mirnylib import numutils
import warnings
from mirnylib.numutils import PCA, EIG, correct, \
ultracorrectSymmetricWithVector, isInteger, \
observedOverExpected, ultracorrect, adaptiveSmoothing, \
removeDiagonals, fillDiagonal
from mirnylib.genome import Genome
import numpy as np
from math import exp
from mirnylib.h5dict import h5dict
from scipy.stats.stats import spearmanr
from mirnylib.numutils import fakeCisImpl
class binnedData(object):
"""Base class to work with binned data, the most documented and
robust part of the code. Further classes for other analysis
are inherited from this class.
"""
def __init__(self, resolution, genome, readChrms=["#", "X"]):
"""
self.__init__ - initializes an empty dataset.
This method sets up a Genome object and resolution.
Genome object specifies genome version and inclusion/exclusion
of sex chromosomes.
Parameters
----------
resolution : int
Resolution of all datasets
genome : genome Folder or Genome object
"""
if type(genome) == str:
self.genome = Genome(genomePath=genome, readChrms=readChrms)
else:
self.genome = genome
assert hasattr(self.genome, "chrmCount")
if resolution is not None:
self.resolution = resolution
self.chromosomes = self.genome.chrmLens
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.dataDict = {}
self.biasDict = {}
self.trackDict = {}
self.singlesDict = {}
self.fragsDict = {}
self.PCDict = {}
self.EigDict = {}
self.eigEigenvalueDict = {}
self.PCAEigenvalueDict = {}
self.dicts = [self.trackDict, self.biasDict, self.singlesDict,
self.fragsDict]
self.eigDicts = [self.PCDict, self.EigDict]
self._loadGC()
self.appliedOperations = {}
def _initChromosomes(self):
"internal: loads mappings from the genome class based on resolution"
self.chromosomeStarts = self.genome.chrmStartsBinCont
self.centromerePositions = self.genome.cntrMidsBinCont
self.chromosomeEnds = self.genome.chrmEndsBinCont
self.trackLength = self.genome.numBins
self.chromosomeCount = self.genome.chrmCount
self.chromosomeIndex = self.genome.chrmIdxBinCont
self.positionIndex = self.genome.posBinCont
self.armIndex = self.chromosomeIndex * 2 + \
np.array(self.positionIndex > self.genome.cntrMids
[self.chromosomeIndex], int)
def _giveMask(self):
"Returns index of all bins with non-zero read counts"
self.mask = np.ones(len(self.dataDict.values()[0]), np.bool)
for data in self.dataDict.values():
datasum = np.sum(data, axis=0)
datamask = datasum > 0
self.mask *= datamask
return self.mask
def _giveMask2D(self):
"""Returns outer product of _giveMask with itself,
i.e. bins with possibly non-zero counts"""
self._giveMask()
self.mask2D = self.mask[:, None] * self.mask[None, :]
return self.mask2D
def _loadGC(self):
"loads GC content at given resolution"
self.trackDict["GC"] = np.concatenate(self.genome.GCBin)
def _checkItertiveCorrectionError(self):
"""internal method for checking if iterative correction
might be bad to apply"""
for value in self.dataDict.values():
if isInteger(value) == True:
s = np.sum(value, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < 100:
error = int(100. / np.sqrt(sums[0]))
message1 = "Lowest 5 sums of an array rows are: " + \
str(sums[:5])
warnings.warn("\n%s\nIterative correction will lead to \
about %d %% relative error for certain columns" %
(message1, error))
if sums[0] < 5:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
else:
s = np.sum(value > 0, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < min(100, len(value) / 2):
error = int(100. / np.sqrt(sums[0]))
print "Got floating-point array for correction. Rows with \
5 least entrees are:", sums[:5]
warnings.warn("\nIterative correction might lead to about\
%d %% relative error for certain columns" % error)
if sums[0] < 4:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
def _checkAppliedOperations(self, neededKeys=[],
advicedKeys=[],
excludedKeys=[]):
"Internal method to check if all needed operations were applied"
if (True in [i in self.appliedOperations for i in excludedKeys]):
print "Operations that are not allowed:", excludedKeys
print "applied operations: ", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Prohibited filter was applied")
if (False in [i in self.appliedOperations for i in neededKeys]):
print "needed operations:", neededKeys
print "applied operations:", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Critical filter not applied")
if (False in [i in self.appliedOperations for i in advicedKeys]):
print "Adviced operations:", advicedKeys
print "Applied operations:", self.appliedOperations
warnings.warn("\nNot all adviced filters applied")
def _recoverOriginalReads(self, key):
"""Attempts to recover original read counts from the data
If data is integer, returns data.
If not, attepts to revert iterative correction
and return original copy.
This method does not modify the dataset!
"""
data = self.dataDict[key]
if "Corrected" not in self.appliedOperations:
if isInteger(data):
return data
else:
warnings.warn("Data was not corrected, but is not integer")
return None
else:
if key not in self.biasDict:
warnings.warn("Correction was applied, "
"but bias information is missing!")
return None
bias = self.biasDict[key]
data1 = data * bias[:, None]
data1 *= bias[None, :]
if isInteger(data1):
return data1
else:
warnings.warn("Attempted recovery of reads, but "
"data is not integer")
return None
def simpleLoad(self, in_data, name, chromosomeOrder=None):
"""Loads data from h5dict file or dict-like object
Parameters
----------
in_data : str or dict-like
h5dict filename or dictionary-like object with input data,
stored under the key "heatmap", and a vector of SS reads,
stored under the key "singles".
name : str
Key under which to store dataset in self.dataDict
chromosomeOrder : None or list
If file to load is a byChromosome map, use this to define chromosome order
"""
if type(in_data) == str:
path = os.path.abspath(os.path.expanduser(in_data))
if os.path.exists(path) == False:
raise IOError("HDF5 dict do not exist, %s" % path)
alldata = h5dict(path, mode="r")
else:
alldata = in_data
if type(alldata) == h5dict:
if ("0 0" in alldata.keys()) and ("heatmap" not in alldata.keys()):
if chromosomeOrder != None:
chromosomes = chromosomeOrder
else:
chromosomes = xrange(self.chromosomeCount)
datas = []
for i in chromosomes:
datas.append(np.concatenate([alldata["{0} {1}".format(i, j)] for j in chromosomes], axis=1))
newdata = {"heatmap": np.concatenate(datas)}
for i in alldata.keys():
newdata[i] = alldata[i]
alldata = newdata
self.dataDict[name] = np.asarray(alldata["heatmap"], dtype=np.double)
try:
self.singlesDict[name] = alldata["singles"]
except:
print "No SS reads found"
try:
if len(alldata["frags"]) == self.genome.numBins:
self.fragsDict[name] = alldata["frags"]
else:
print "Different bin number in frag dict"
except:
pass
if "resolution" in alldata:
if self.resolution != alldata["resolution"]:
print "resolution mismatch!!!"
print "--------------> Bye <-------------"
raise StandardError("Resolution mismatch! ")
if self.genome.numBins != len(alldata["heatmap"]):
print "Genome length mismatch!!!"
print "source genome", len(alldata["heatmap"])
print "our genome", self.genome.numBins
print "Check for readChrms parameter when you identify the genome"
raise StandardError("Genome size mismatch! ")
def export(self, name, outFilename, byChromosome=False, **kwargs):
"""
Exports current heatmaps and SS files to an h5dict.
Parameters
----------
name : str
Key for the dataset to export
outFilename : str
Where to export
byChromosome : bool or "cis" or "all"
save by chromosome heatmaps.
Ignore SS reads.
True means "all"
"""
if "out_filename" in kwargs.keys():
raise ValueError("out_filename replaced with outFilename!")
if name not in self.dataDict:
raise ValueError("No data {name}".format(name=name))
toexport = {}
if byChromosome is False:
toexport["heatmap"] = self.dataDict[name]
if name in self.singlesDict:
toexport["singles"] = self.singlesDict[name]
if name in self.fragsDict:
toexport["frags"] = self.fragsDict[name]
else:
hm = self.dataDict[name]
for i in xrange(self.genome.chrmCount):
for j in xrange(self.genome.chrmCount):
if (byChromosome == "cis") and (i != j):
continue
st1 = self.chromosomeStarts[i]
end1 = self.chromosomeEnds[i]
st2 = self.chromosomeStarts[j]
end2 = self.chromosomeEnds[j]
toexport["{0} {1}".format(i, j)] = hm[st1:end1,
st2:end2]
toexport["resolution"] = self.resolution
toexport["genome"] = self.genome.folderName
toexport["binNumber"] = len(self.chromosomeIndex)
toexport["genomeIdxToLabel"] = self.genome.idx2label
toexport["chromosomeStarts"] = self.chromosomeStarts
toexport["chromosomeIndex"] = self.chromosomeIndex
toexport["positionIndex"] = self.positionIndex
myh5dict = h5dict(outFilename, mode="w")
myh5dict.update(toexport)
def removeDiagonal(self, m=1):
"""Removes all bins on a diagonal, and bins that are up to m away
from the diagonal, including m.
By default, removes all bins touching the diagonal.
Parameters
----------
m : int, optional
Number of bins to remove
"""
for i in self.dataDict.keys():
self.dataDict[i] = np.asarray(
self.dataDict[i], dtype=np.double, order="C")
removeDiagonals(self.dataDict[i], m)
self.appliedOperations["RemovedDiagonal"] = True
self.removedDiagonalValue = m
def removeStandalone(self, offset=3):
"""removes standalone groups of bins
(groups of less-than-offset bins)
Parameters
----------
offset : int
Maximum length of group of bins to be removed
"""
diffs = np.diff(np.array(np.r_[False, self._giveMask(), False], int))
begins = np.nonzero(diffs == 1)[0]
ends = np.nonzero(diffs == -1)[0]
beginsmask = (ends - begins) <= offset
newbegins = begins[beginsmask]
newends = ends[beginsmask]
print "removing %d standalone bins" % np.sum(newends - newbegins)
mask = self._giveMask()
for i in xrange(len(newbegins)):
mask[newbegins[i]:newends[i]] = False
mask2D = mask[:, None] * mask[None, :]
antimask = np.nonzero(mask2D.flat == False)[0]
for i in self.dataDict.values():
i.flat[antimask] = 0
self.appliedOperations["RemovedStandalone"] = True
def removeBySequencedCount(self, sequencedFraction=0.5):
"""
Removes bins that have less than sequencedFraction*resolution
sequenced counts.
This filters bins by percent of sequenced counts,
and also removes the last bin if it's very short.
.. note:: this is not equivalent to mapability
Parameters
----------
sequencedFraction: float, optional, 0<x<1
Fraction of the bin that needs to be sequenced in order
to keep the bin
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
binCutoff = int(self.resolution * sequencedFraction)
sequenced = np.concatenate(self.genome.mappedBasesBin)
mask = sequenced < binCutoff
nzmask = np.zeros(
len(mask), bool) # mask of regions with non-zero counts
for i in self.dataDict.values():
sumData = np.sum(i[mask], axis=1) > 0
nzmask[mask] = nzmask[mask] + sumData
i[mask, :] = 0
i[:, mask] = 0
print "Removing %d bins with <%lf %% coverage by sequenced reads" % \
((nzmask > 0).sum(), 100 * sequencedFraction)
self.appliedOperations["RemovedUnsequenced"] = True
pass
def removePoorRegions(self, names=None, cutoff=2, coverage=False, trans=False):
"""Removes "cutoff" percent of bins with least counts
Parameters
----------
names : list of str
List of datasets to perform the filter. All by default.
cutoff : int, 0<cutoff<100
Percent of lowest-counts bins to be removed
"""
statmask = np.zeros(len(self.dataDict.values()[0]), np.bool)
mask = np.ones(len(self.dataDict.values()[0]), np.bool)
if names is None:
names = self.dataDict.keys()
for i in names:
data = self.dataDict[i]
if trans:
data = data.copy()
data[self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]] = 0
datasum = np.sum(data, axis=0)
datamask = datasum > 0
mask *= datamask
if coverage == False:
countsum = np.sum(data, axis=0)
elif coverage == True:
countsum = np.sum(data > 0, axis=0)
else:
raise ValueError("coverage is true or false!")
newmask = countsum >= np.percentile(countsum[datamask], cutoff)
mask *= newmask
statmask[(newmask == False) * (datamask == True)] = True
print "removed {0} poor bins".format(statmask.sum())
inds = np.nonzero(mask == False)
for i in self.dataDict.values():
i[inds, :] = 0
i[:, inds] = 0
self.appliedOperations["RemovedPoor"] = True
def truncTrans(self, high=0.0005):
"""Truncates trans contacts to remove blowouts
Parameters
----------
high : float, 0<high<1, optional
Fraction of top trans interactions to be removed
"""
for i in self.dataDict.keys():
data = self.dataDict[i]
transmask = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
lim = np.percentile(data[transmask], 100. * (1 - high))
print "dataset %s truncated at %lf" % (i, lim)
tdata = data[transmask]
tdata[tdata > lim] = lim
self.dataDict[i][transmask] = tdata
self.appliedOperations["TruncedTrans"] = True
def removeCis(self):
"sets to zero all cis contacts"
mask = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
for i in self.dataDict.keys():
self.dataDict[i][mask] = 0
self.appliedOperations["RemovedCis"] = True
print("All cis counts set to zero")
def fakeCisOnce(self, mask="CisCounts", silent=False):
"""Used to fake cis counts or any other region
with random trans counts.
If extra mask is supplied, it is used instead of cis counts.
This method draws fake contact once.
Use fakeCis() for iterative self-consistent faking of cis.
Parameters
----------
mask : NxN boolean array or "CisCounts"
Mask of elements to be faked.
If set to "CisCounts", cis counts will be faked
When mask is used, cis elements are NOT faked.
silent : bool
Do not print anything
"""
#TODO (MIU): check this method!
if silent == False:
print("All cis counts are substituted with matching trans count")
for key in self.dataDict.keys():
data = np.asarray(self.dataDict[key], order="C", dtype=float)
if mask == "CisCounts":
_mask = np.array(self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :], int, order="C")
else:
assert mask.shape == self.dataDict.values()[0].shape
_mask = np.array(mask, dtype=int, order="C")
_mask[self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :]] = 2
s = np.abs(np.sum(data, axis=0)) <= 1e-10
_mask[:, s] = 2
_mask[s, :] = 2
_mask = np.asarray(_mask, dtype=np.int64)
fakeCisImpl(data, _mask)
self.dataDict[key] = data
self.appliedOperations["RemovedCis"] = True
self.appliedOperations["FakedCis"] = True
def fakeCis(self, force=False, mask="CisCounts"):
"""This method fakes cis contacts in an interative way
It is done to achieve faking cis contacts that is
independent of normalization of the data.
Parameters
----------
Force : bool (optional)
Set this to avoid checks for iterative correction
mask : see fakeCisOnce
"""
self.removeCis()
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
print("All cis counts are substituted with faked counts")
print("Data is iteratively corrected as a part of faking cis counts")
def fakeTranslocations(self, translocationRegions):
"""
This method fakes reads corresponding to a translocation.
Parameters
----------
translocationRegions: list of tuples
List of tuples (chr1,start1,end1,chr2,start2,end2),
masking a high-count region around visible translocation.
If end1/end2 is None, it is treated as length of chromosome.
So, use (chr1,0,None,chr2,0,None) to remove inter-chromosomal
interaction entirely.
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
mask = np.zeros((self.genome.numBins, self.genome.numBins), int)
resolution = self.genome.resolution
for i in translocationRegions:
st1 = self.genome.chrmStartsBinCont[i[0]]
st2 = self.genome.chrmStartsBinCont[i[3]]
beg1 = st1 + i[1] / resolution
if i[2] is not None:
end1 = st1 + i[2] / resolution + 1
else:
end1 = self.genome.chrmEndsBinCont[i[0]]
beg2 = st2 + i[4] / resolution
if i[5] is not None:
end2 = st2 + i[5] / resolution + 1
else:
end2 = self.genome.chrmEndsBinCont[i[3]]
mask[beg1:end1, beg2:end2] = 1
mask[beg2:end2, beg1:end1] = 1
self.fakeCisOnce(mask)
def correct(self, names=None):
"""performs single correction without SS
Parameters
----------
names : list of str or None
Keys of datasets to be corrected. If none, all are corrected.
"""
self.iterativeCorrectWithoutSS(names, M=1)
def iterativeCorrectWithoutSS(self, names=None, M=None, force=False,
tolerance=1e-5):
"""performs iterative correction without SS
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
M : int, optional
Number of iterations to perform.
force : bool, optional
Ignore warnings and pre-requisite filters
"""
if force == False:
self._checkItertiveCorrectionError()
self._checkAppliedOperations(advicedKeys=[
"RemovedDiagonal", "RemovedPoor"])
if names is None:
names = self.dataDict.keys()
for i in names:
data, dummy, bias = ultracorrectSymmetricWithVector(
self.dataDict[i], M=M, tolerance=tolerance)
self.dataDict[i] = data
self.biasDict[i] = bias
if i in self.singlesDict:
self.singlesDict[i] = self.singlesDict[i] / bias.astype(float)
self.appliedOperations["Corrected"] = True
def adaptiveSmoothing(self, smoothness, useOriginalReads="try",
names=None, rawReadDict=None):
"""
Performs adaptive smoothing of Hi-C datasets.
Adaptive smoothing attempts to smooth low-count, "sparce" part
of a Hi-C matrix, while keeping the contrast in a high-count
"diagonal" part of the matrix.
It does it by blurring each bin pair value into a gaussian, which
should encoumpass at least **smoothness** raw reads. However, only
half of reads from each bin pair is counted into this gaussian, while
full reads from neighboring bin pairs are counted.
To summarize:
If a bin pair contains #>2*smoothness reads, it is kept intact.
If a bin pair contains #<2*smoothness reads, reads around bin pair
are counted, and a bin pair is smoothed to a circle (gaussian),
containing smoothness - (#/2) reads.
A standalone read in a sparce part of a matrix is smoothed to a
circle (gaussian) that encoumpasses smoothness reads.
.. note::
This algorithm can smooth any heatmap, e.g. corrected one.
However, ideally it needs to know raw reads to correctly leverage
the contribution from different bins.
By default, it attempts to recover raw reads. However, it
can do so only after single iterative correction.
If used after fakeCis method, it won't use raw reads, unless
provided externally.
.. warning::
Note that if you provide raw reads externally, you would need
to make a copy of dataDict prior to filtering the data,
not just a reference to it. Like
>>>for i in keys: dataCopy[i] = self.dataDict[i].copy()
Parameters
----------
smoothness : float, positive. Often >1.
Parameter of smoothness as described above
useOriginalReads : bool or "try"
If True, requires to recover original reads for smoothness
If False, treats heatmap data as reads
If "try", attempts to recover original reads;
otherwise proceeds with heatmap data.
rawReadDict : dict
A copy of self.dataDict with raw reads
"""
if names is None:
names = self.dataDict.keys()
mask2D = self._giveMask2D()
#If diagonal was removed, we should remember about it!
if hasattr(self, "removedDiagonalValue"):
removeDiagonals(mask2D, self.removedDiagonalValue)
for name in names:
data = self.dataDict[name]
if useOriginalReads is not False:
if rawReadDict is not None:
#raw reads provided externally
reads = rawReadDict[name]
else:
#recovering raw reads
reads = self._recoverOriginalReads(name)
if reads is None:
#failed to recover reads
if useOriginalReads == True:
raise RuntimeError("Cannot recover original reads!")
else:
#raw reads were not requested
reads = None
if reads is None:
reads = data # Feed this to adaptive smoothing
smoothed = np.zeros_like(data, dtype=float)
N = self.chromosomeCount
for i in xrange(N):
for j in xrange(N):
st1 = self.chromosomeStarts[i]
st2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
cur = data[st1:end1, st2:end2]
curReads = reads[st1:end1, st2:end2]
curMask = mask2D[st1:end1, st2:end2]
s = adaptiveSmoothing(matrix=cur,
cutoff=smoothness,
alpha=0.5,
mask=curMask,
originalCounts=curReads)
smoothed[st1:end1, st2:end2] = s
self.dataDict[name] = smoothed
self.appliedOperations["Smoothed"] = True
def removeChromosome(self, chromNum):
"""removes certain chromosome from all tracks and heatmaps,
setting all values to zero
Parameters
----------
chromNum : int
Number of chromosome to be removed
"""
beg = self.genome.chrmStartsBinCont[chromNum]
end = self.genome.chrmEndsBinCont[chromNum]
for i in self.dataDict.values():
i[beg:end] = 0
i[:, beg:end] = 0
for mydict in self.dicts:
for value in mydict.values():
value[beg:end] = 0
for mydict in self.eigDicts:
for value in mydict.values():
value[beg:end] = 0
def removeZeros(self, zerosMask=None):
"""removes bins with zero counts
keeps chromosome starts, ends, etc. consistent
Parameters
----------
zerosMask : length N array or None, optional
If provided, this method removes a defined set of bins
By default, it removes bins with zero # counts.
"""
if zerosMask is not None:
s = zerosMask
else:
s = np.sum(self._giveMask2D(), axis=0) > 0
for i in self.dataDict.values():
s *= (np.sum(i, axis=0) > 0)
indices = np.zeros(len(s), int)
count = 0
for i in xrange(len(indices)):
if s[i] == True:
indices[i] = count
count += 1
else:
indices[i] = count
indices = np.r_[indices, indices[-1] + 1]
N = len(self.positionIndex)
for i in self.dataDict.keys():
a = self.dataDict[i]
if len(a) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (i, len(a), N))
b = a[:, s]
c = b[s, :]
self.dataDict[i] = c
for mydict in self.dicts:
for key in mydict.keys():
if len(mydict[key]) != N:
raise ValueError("Wrong dimensions of data {0}: {1} instead of {2}".format(key, len(mydict[key]), N))
mydict[key] = mydict[key][s]
for mydict in self.eigDicts:
for key in mydict.keys():
mydict[key] = mydict[key][:, s]
if len(mydict[key][0]) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (key, len(mydict[key][0]), N))
self.chromosomeIndex = self.chromosomeIndex[s]
self.positionIndex = self.positionIndex[s]
self.armIndex = self.armIndex[s]
self.chromosomeEnds = indices[self.chromosomeEnds]
self.chromosomeStarts = indices[self.chromosomeStarts]
self.centromerePositions = indices[self.centromerePositions]
self.removeZerosMask = s
if self.appliedOperations.get("RemovedZeros", False) == True:
warnings.warn("\nYou're removing zeros twice. \
You can't restore zeros now!")
self.appliedOperations["RemovedZeros"] = True
self.genome.setResolution(-1)
return s
def restoreZeros(self, value=np.NAN):
"""Restores zeros that were removed by removeZeros command.
.. warning:: You can restore zeros only if you used removeZeros once.
Parameters
----------
value : number-like, optional.
Value to fill in missing regions. By default, NAN.
"""
if not hasattr(self, "removeZerosMask"):
raise StandardError("Zeros have not been removed!")
s = self.removeZerosMask
N = len(s)
for i in self.dataDict.keys():
a = self.dataDict[i]
self.dataDict[i] = np.zeros((N, N), dtype=a.dtype) * value
tmp = np.zeros((N, len(a)), dtype=a.dtype) * value
tmp[s, :] = a
self.dataDict[i][:, s] = tmp
for mydict in self.dicts:
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros(N, dtype=a.dtype) * value
mydict[key][s] = a
for mydict in self.eigDicts:
#print mydict
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros((len(a), N), dtype=a.dtype) * value
mydict[key][:, s] = a
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.appliedOperations["RemovedZeros"] = False
def doPCA(self, force=False):
"""performs PCA on the data
creates dictionary self.PCADict with results
Last column of PC matrix is first PC, second to last - second, etc.
Returns
-------
Dictionary of principal component matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentPCA, eigenvalues = PCA(self.dataDict[i])
self.PCAEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentPCA)):
if spearmanr(currentPCA[j], self.trackDict["GC"])[0] < 0:
currentPCA[j] = -currentPCA[j]
self.PCDict[i] = currentPCA
return self.PCDict
def doEig(self, numPCs=3, force=False):
"""performs eigenvector expansion on the data
creates dictionary self.EigDict with results
Last row of the eigenvector matrix is the largest eigenvector, etc.
Returns
-------
Dictionary of eigenvector matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentEIG, eigenvalues = EIG(self.dataDict[i], numPCs=numPCs)
self.eigEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentEIG)):
if spearmanr(currentEIG[j], self.trackDict["GC"])[0] < 0:
currentEIG[j] = -currentEIG[j]
self.EigDict[i] = currentEIG
return self.EigDict
def doCisPCADomains(
self, numPCs=3, swapFirstTwoPCs=False, useArms=True,
corrFunction=lambda x, y: spearmanr(x, y)[0],
domainFunction="default"):
"""Calculates A-B compartments based on cis data.
All PCs are oriented to have positive correlation with GC.
Writes the main result (PCs) in the self.PCADict dictionary.
Additionally, returns correlation coefficients with GC; by chromosome.
Parameters
----------
numPCs : int, optional
Number of PCs to compute
swapFirstTwoPCs : bool, by default False
Swap first and second PC if second has higher correlation with GC
useArms : bool, by default True
Use individual arms, not chromosomes
corr function : function, default: spearmanr
Function to compute correlation with GC.
Accepts two arrays, returns correlation
domain function : function, optional
Function to calculate principal components of a square matrix.
Accepts: N by N matrix
returns: numPCs by N matrix
Default does iterative correction, then observed over expected.
Then IC
Then calculates correlation matrix.
Then calculates PCA of correlation matrix.
other options: metaphasePaper (like in Naumova, Science 2013)
.. note:: Main output of this function is written to self.PCADict
Returns
-------
corrdict,lengthdict
Dictionaries with keys for each dataset.
Values of corrdict contains an M x numPCs array with correlation
coefficient for each chromosome (or arm) with non-zero length.
Values of lengthdict contain lengthds of chromosomes/arms.
These dictionaries can be used to calculate average correlation
coefficient by chromosome (or by arm).
"""
corr = corrFunction
if (type(domainFunction) == str):
domainFunction = domainFunction.lower()
if domainFunction in ["metaphasepaper", "default", "lieberman",
"erez", "geoff", "lieberman+", "erez+"]:
fname = domainFunction
def domainFunction(chrom):
#orig = chrom.copy()
M = len(chrom.flat)
toclip = 100 * min(0.999, (M - 10.) / M)
removeDiagonals(chrom, 1)
chrom = ultracorrect(chrom)
chrom = observedOverExpected(chrom)
chrom = np.clip(chrom, -1e10, np.percentile(chrom, toclip))
for i in [-1, 0, 1]:
fillDiagonal(chrom, 1, i)
if fname in ["default", "lieberman+", "erez+"]:
#upgrade of (Lieberman 2009)
# does IC, then OoE, then IC, then corrcoef, then PCA
chrom = ultracorrect(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["lieberman", "erez"]:
#slight upgrade of (Lieberman 2009)
# does IC, then OoE, then corrcoef, then PCA
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["metaphasepaper", "geoff"]:
chrom = ultracorrect(chrom)
PCs = EIG(chrom, numPCs)[0]
return PCs
else:
raise
if domainFunction in ["lieberman-", "erez-"]:
#simplest function presented in (Lieberman 2009)
#Closest to (Lieberman 2009) that we could do
def domainFunction(chrom):
removeDiagonals(chrom, 1)
chrom = observedOverExpected(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
corrdict, lengthdict = {}, {}
#dict of per-chromosome correlation coefficients
for key in self.dataDict.keys():
corrdict[key] = []
lengthdict[key] = []
dataset = self.dataDict[key]
N = len(dataset)
PCArray = np.zeros((3, N))
for chrom in xrange(len(self.chromosomeStarts)):
if useArms == False:
begs = (self.chromosomeStarts[chrom],)
ends = (self.chromosomeEnds[chrom],)
else:
begs = (self.chromosomeStarts[chrom],
self.centromerePositions[chrom])
ends = (self.centromerePositions[chrom],
self.chromosomeEnds[chrom])
for end, beg in map(None, ends, begs):
if end - beg < 5:
continue
chrom = dataset[beg:end, beg:end]
GC = self.trackDict["GC"][beg:end]
PCs = domainFunction(chrom)
for PC in PCs:
if corr(PC, GC) < 0:
PC *= -1
if swapFirstTwoPCs == True:
if corr(PCs[0], GC) < corr(PCs[1], GC):
p0, p1 = PCs[0].copy(), PCs[1].copy()
PCs[0], PCs[1] = p1, p0
corrdict[key].append(tuple([corr(i, GC) for i in PCs]))
lengthdict[key].append(end - beg)
PCArray[:, beg:end] = PCs
self.PCDict[key] = PCArray
return corrdict, lengthdict
def cisToTrans(self, mode="All", filename="GM-all"):
"""
Calculates cis-to-trans ratio.
"All" - treating SS as trans reads
"Dummy" - fake SS reads proportional to cis reads with the same
total sum
"Matrix" - use heatmap only
"""
data = self.dataDict[filename]
cismap = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
cissums = np.sum(cismap * data, axis=0)
allsums = np.sum(data, axis=0)
if mode.lower() == "all":
cissums += self.singlesDict[filename]
allsums += self.singlesDict[filename]
elif mode.lower() == "dummy":
sm = np.mean(self.singlesDict[filename])
fakesm = cissums * sm / np.mean(cissums)
cissums += fakesm
allsums += fakesm
elif mode.lower() == "matrix":
pass
else:
raise
return cissums / allsums
class binnedDataAnalysis(binnedData):
"""
Class containing experimental features and data analysis scripts
"""
def plotScaling(self, name, label="BLA", color=None, plotUnit=1000000):
"plots scaling of a heatmap,treating arms separately"
import matplotlib.pyplot as plt
data = self.dataDict[name]
bins = numutils.logbins(
2, self.genome.maxChrmArm / self.resolution, 1.17)
s = np.sum(data, axis=0) > 0
mask = s[:, None] * s[None, :]
chroms = []
masks = []
for i in xrange(self.chromosomeCount):
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
observed = []
expected = []
for i in xrange(len(bins) - 1):
low = bins[i]
high = bins[i + 1]
obs = 0
exp = 0
for j in xrange(len(chroms)):
if low > len(chroms[j]):
continue
high2 = min(high, len(chroms[j]))
for k in xrange(low, high2):
obs += np.sum(np.diag(chroms[j], k))
exp += np.sum(np.diag(masks[j], k))
observed.append(obs)
expected.append(exp)
observed = np.array(observed, float)
expected = np.array(expected, float)
values = observed / expected
bins = np.array(bins, float)
bins2 = 0.5 * (bins[:-1] + bins[1:])
norm = np.sum(values * (bins[1:] - bins[:-1]) * (
self.resolution / float(plotUnit)))
args = [self.resolution * bins2 / plotUnit, values / (1. * norm)]
if color is not None:
args.append(color)
plt.plot(*args, label=label, linewidth=2)
def averageTransMap(self, name, **kwargs):
"plots and returns average inter-chromosomal inter-arm map"
import matplotlib.pyplot as plt
from mirnylib.plotting import removeBorder
data = self.dataDict[name]
avarms = np.zeros((80, 80))
avmasks = np.zeros((80, 80))
discardCutoff = 10
for i in xrange(self.chromosomeCount):
print i
for j in xrange(self.chromosomeCount):
for k in [-1, 1]:
for l in [-1, 1]:
if i == j:
continue
cenbeg1 = self.chromosomeStarts[i] + \
self.genome.cntrStarts[i] / self.resolution
cenbeg2 = self.chromosomeStarts[j] + \
self.genome.cntrStarts[j] / self.resolution
cenend1 = self.chromosomeStarts[i] + \
self.genome.cntrEnds[i] / self.resolution
cenend2 = self.chromosomeStarts[j] + \
self.genome.cntrEnds[j] / self.resolution
beg1 = self.chromosomeStarts[i]
beg2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
if k == 1:
bx = cenbeg1
ex = beg1 - 1
dx = -1
else:
bx = cenend1
ex = end1
dx = 1
if l == 1:
by = cenbeg2
ey = beg2 - 1
dy = -1
else:
by = cenend2
ey = end2
dy = 1
if abs(bx - ex) < discardCutoff:
continue
if bx < 0:
bx = None
if ex < 0:
ex = None
if abs(by - ey) < discardCutoff:
continue
if by < 0:
by = None
if ey < 0:
ey = None
arms = data[bx:ex:dx, by:ey:dy]
assert max(arms.shape) <= self.genome.maxChrmArm / \
self.genome.resolution + 2
mx = np.sum(arms, axis=0)
my = np.sum(arms, axis=1)
maskx = mx == 0
masky = my == 0
mask = (maskx[None, :] + masky[:, None]) == False
maskf = np.array(mask, float)
mlenx = (np.abs(np.sum(mask, axis=0)) > 1e-20).sum()
mleny = (np.abs(np.sum(mask, axis=1)) > 1e-20).sum()
if min(mlenx, mleny) < discardCutoff:
continue
add = numutils.zoomOut(arms, avarms.shape)
assert np.abs((arms.sum() - add.sum(
)) / arms.sum()) < 0.02
addmask = numutils.zoomOut(maskf, avarms.shape)
avarms += add
avmasks += addmask
avarms /= np.mean(avarms)
data = avarms / avmasks
data /= np.mean(data)
plt.imshow(np.log(numutils.trunc(
data)), cmap="jet", interpolation="nearest", **kwargs)
removeBorder()
return np.log(numutils.trunc(data))
def perArmCorrelation(self, data1, data2, doByArms=[]):
"""does inter-chromosomal spearman correlation
of two vectors for each chromosomes separately.
Averages over chromosomes with weight of chromosomal length
For chromosomes in "doByArms" treats arms as separatre chromosomes
returns average Spearman r correlation
"""
cr = 0
ln = 0
for i in xrange(self.chromosomeCount):
if i in doByArms:
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
else:
beg = self.chromosomeStarts[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
return cr / ln
def divideOutAveragesPerChromosome(self):
"divides each interchromosomal map by it's mean value"
mask2D = self._giveMask2D()
for chrom1 in xrange(self.chromosomeCount):
for chrom2 in xrange(self.chromosomeCount):
for i in self.dataDict.keys():
value = self.dataDict[i]
submatrix = value[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]]
masksum = np.sum(
mask2D[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]])
valuesum = np.sum(submatrix)
mean = valuesum / masksum
submatrix /= mean
def interchromosomalValues(self, filename="GM-all", returnAll=False):
"""returns average inter-chromosome-interaction values,
ordered always the same way"""
values = self.chromosomeIndex[:, None] + \
self.chromosomeCount * self.chromosomeIndex[None, :]
values[self.chromosomeIndex[:, None] == self.chromosomeIndex[None,
:]] = self.chromosomeCount * self.chromosomeCount - 1
#mat_img(values)
uv = np.sort(np.unique(values))[1:-1]
probs = np.bincount(
values.ravel(), weights=self.dataDict[filename].ravel())
counts = np.bincount(values.ravel())
if returnAll == False:
return probs[uv] / counts[uv]
else:
probs[self.chromosomeCount * self.chromosomeCount - 1] = 0
values = probs / counts
values[counts == 0] = 0
#mat_img(values.reshape((22,22)))
return values.reshape((self.chromosomeCount, self.chromosomeCount))
class experimentalBinnedData(binnedData):
"Contains some poorly-implemented new features"
def projectOnEigenvalues(self, eigenvectors=[0]):
"""
Calculates projection of the data on a set of eigenvectors.
This is used to calculate heatmaps, reconstructed from eigenvectors.
Parameters
----------
eigenvectors : list of non-negative ints, optional
Zero-based indices of eigenvectors, to project onto
By default projects on the first eigenvector
Returns
-------
Puts resulting data in dataDict under DATANAME_projected key
"""
for name in self.dataDict.keys():
if name not in self.EigDict:
raise RuntimeError("Calculate eigenvectors first!")
PCs = self.EigDict[name]
if max(eigenvectors) >= len(PCs):
raise RuntimeError("Not enough eigenvectors."
"Increase numPCs in doEig()")
PCs = PCs[eigenvectors]
eigenvalues = self.eigEigenvalueDict[name][eigenvectors]
proj = reduce(lambda x, y: x + y,
[PCs[i][:, None] * PCs[i][None, :] * \
eigenvalues[i] for i in xrange(len(PCs))])
mask = PCs[0] != 0
mask = mask[:, None] * mask[None, :] # maks of non-zero elements
data = self.dataDict[name]
datamean = np.mean(data[mask])
proj[mask] += datamean
self.dataDict[name + "_projected"] = proj
def emulateCis(self):
"""if you want to have fun creating syntetic data,
this emulates cis contacts. adjust cis/trans ratio in the C code"""
from scipy import weave
transmap = self.chromosomeIndex[:,
None] == self.chromosomeIndex[None, :]
len(transmap)
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
N = len(data)
N
code = r"""
#line 1427 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = 0; j<N; j++)
{
if (transmap[N * i + j] == 1)
{
data[N * i + j] = data[N * i +j] * 300 /(abs(i-j) + \
0.5);
}
}
}
"""
support = """
#include <math.h>
"""
weave.inline(code, ['transmap', 'data', "N"],
extra_compile_args=['-march=native -malign-double'],
support_code=support)
self.dataDict[i] = data
self.removedCis = False
self.fakedCis = False
def fakeMissing(self):
"""fakes megabases that have no reads. For cis reads fakes with cis
reads at the same distance. For trans fakes with random trans read
at the same diagonal.
"""
from scipy import weave
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
sm = np.sum(data, axis=0) > 0
mask = sm[:, None] * sm[None, :]
transmask = np.array(self.chromosomeIndex[:, None]
== self.chromosomeIndex[None, :], int)
#mat_img(transmask)
N = len(data)
N, transmask, mask # to remove warning
code = r"""
#line 1467 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = i; j<N; j++)
{
if ((MASK2(i,j) == 0) )
{
for (int ss = 0; ss < 401; ss++)
{
int k = 0;
int s = rand() % (N - (j-i));
if ((mask[s * N + s + j - i] == 1) &&\
((transmask[s * N + s + j - i] ==\
transmask[i * N + j]) || (ss > 200)) )
{
data[i * N + j] = data[s * N + s + j - i];
data[j * N + i] = data[s * N + s + j - i];
break;
}
if (ss == 400) {printf("Cannot fake one point... \
skipping %d %d \n",i,j);}
}
}
}
}
"""
support = """
#include <math.h>
"""
for _ in xrange(5):
weave.inline(code, ['transmask', 'mask', 'data', "N"],
extra_compile_args=['-march=native'
' -malign-double -O3'],
support_code=support)
data = correct(data)
self.dataDict[i] = data
#mat_img(self.dataDict[i]>0)
def iterativeCorrectByTrans(self, names=None):
"""performs iterative correction by trans data only, corrects cis also
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
"""
self.appliedOperations["Corrected"] = True
if names is None:
names = self.dataDict.keys()
self.transmap = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
#mat_img(self.transmap)
for i in names:
data = self.dataDict[i]
self.dataDict[i], self.biasDict[i] = \
numutils.ultracorrectSymmetricByMask(data, self.transmap, M=None)
try:
self.singlesDict[i] /= self.biasDict[i]
except:
print "bla"
def loadWigFile(self, filenames, label, control=None,
wigFileType="Auto", functionToAverage=np.log, internalResolution=1000):
byChromosome = self.genome.parseAnyWigFile(filenames=filenames,
control=control,
wigFileType=wigFileType,
functionToAverage=functionToAverage,
internalResolution=internalResolution)
self.trackDict[label] = np.concatenate(byChromosome)
def loadErezEigenvector1MB(self, erezFolder):
"Loads Erez chromatin domain eigenvector for HindIII"
if self.resolution != 1000000:
raise StandardError("Erez eigenvector is only at 1MB resolution")
if self.genome.folderName != "hg18":
raise StandardError("Erez eigenvector is for hg18 only!")
folder = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector.tab")
folder2 = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector2.tab")
eigenvector = np.zeros(self.genome.numBins, float)
for chrom in range(1, 24):
filename = folder.replace("DATA1", str(chrom))
if chrom in [4, 5]:
filename = folder2.replace("DATA1", str(chrom))
mydata = np.array([[float(j) for j in i.split(
)] for i in open(filename).readlines()])
eigenvector[self.genome.chrmStartsBinCont[chrom -
1] + np.array(mydata[:, 1], int)] = mydata[:, 2]
self.trackDict["Erez"] = eigenvector
def loadTanayDomains(self):
"domains, extracted from Tanay paper image"
if self.genome.folderName != "hg18":
raise StandardError("Tanay domains work only with hg18")
data = """0 - 17, 1 - 13.5, 2 - 6.5, 0 - 2, 2 - 2; x - 6.5, 0 - 6,\
1 - 13.5, 0 - 1.5, 1 - 14.5
1 - 8.5, 0 - 2.5, 1 - 14, 2 - 6; 0 - 1.5, 2 - 11.5, 1 - 35
1 - 14, 0-6, 2 - 11; 2 - 4.5, 1 - 5, 0 - 4, 1 -20.5, 0 - 2
0 - 3, 2 - 14; 2 - 5, 1 - 42
2 - 16; 2 - 7, 0 - 3, 1 - 18.5, 0 - 1, 1 - 13, 0 - 2.5
0 - 2, 1 - 6.5, 0 - 7.5, 2 - 4; 2 - 6, 1 - 31
0 - 2, 1 - 11, 2 - 7; 2 - 7.5, 1 - 5, 0 - 3, 1 - 19
2 - 9.5, 0 - 1, 2 - 5; 2 - 4, 1 - 27.5, 0 - 2.5
2 - 11.5, 0 - 2.5, x - 2.5; x - 5, 2 - 8, 0 - 3.5, 1 - 9, 0 - 6
2 - 13.5; 2 - 9, 0 - 3, 1 - 6, 0 - 3.5, 1 - 10.5
0 - 3.5, 2 - 15; 2 - 1, 0 - 7.5, 1 - 13, 0 - 1.5, 1 - 4
0 - 4, 2 - 8; 2 - 2, 0 - 5, 2 - 2.5, 1 - 13, 0 - 6.5, 1 - 3.5
x - 5.5; 2 - 8.5, 0 - 1, 2 - 7, 1 - 16
x - 5.5; 2 - 14.5, 0 - 6, 2 - 3, 1 - 2.5, 2 - 1, 0 - 3
x - 5.5; 2 - 6, 0 - 3.5, 2 - 1.5, 0 - 11.5, 2 - 5.5
0 - 11, 2 - 1; x - 2.5, 2 - 6.5, 0 - 3, 2 - 2, 0 - 3.5
0 - 4, 2 - 1.5, 0 - 1.5; 0 - 19
2 - 5; 2 - 20
0 - 9.5, x - 1.5; x - 1, 2 - 2, 0 - 8.5
0 - 2, 2 - 7; 0 - 8, 2 - 2, 0 - 1
x - 0.5; 2 - 8.5, 0 - 3
x - 4; 0 -12
x - 1.5, 1 - 13, 2 - 5.5; 2 - 2, 1 - 29"""
chroms = [i.split(";") for i in data.split("\n")]
result = []
for chrom in chroms:
result.append([])
cur = result[-1]
for arm in chrom:
for enrty in arm.split(","):
spentry = enrty.split("-")
if "x" in spentry[0]:
value = -1
else:
value = int(spentry[0])
cur += ([value] * int(2 * float(spentry[1])))
cur += [-1] * 2
#lenses = [len(i) for i in result]
domains = np.zeros(self.genome.numBins, int)
for i in xrange(self.genome.chrmCount):
for j in xrange((self.genome.chrmLens[i] / self.resolution)):
domains[self.genome.chrmStartsBinCont[i] + j] = \
result[i][(j * len(result[i]) / ((self.genome.chrmLens[i] /
self.resolution)))]
self.trackDict['TanayDomains'] = domains
| bsd-3-clause |
CDSFinance/zipline | zipline/examples/dual_ema_talib.py | 12 | 4122 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
from zipline.api import order, record, symbol
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
def initialize(context):
context.asset = symbol('AAPL')
# Add 2 mavg transforms, one with a long window, one with a short window.
context.short_ema_trans = EMA(timeperiod=20)
context.long_ema_trans = EMA(timeperiod=40)
# To keep track of whether we invested in the stock or not
context.invested = False
def handle_data(context, data):
short_ema = context.short_ema_trans.handle_data(data)
long_ema = context.long_ema_trans.handle_data(data)
if short_ema is None or long_ema is None:
return
buy = False
sell = False
if (short_ema > long_ema).all() and not context.invested:
order(context.asset, 100)
context.invested = True
buy = True
elif (short_ema < long_ema).all() and context.invested:
order(context.asset, -100)
context.invested = False
sell = True
record(AAPL=data[context.asset].price,
short_ema=short_ema[context.asset],
long_ema=long_ema[context.asset],
buy=buy,
sell=sell)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if 'AAPL' in results and 'short_ema' in results and 'long_ema' in results:
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index,
results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
else:
msg = 'AAPL, short_ema and long_ema data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Set the simulation start and end dates.
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data).dropna()
# Plot the portfolio and asset data.
analyze(results=results)
| apache-2.0 |
StrellaGroup/erpnext | erpnext/selling/page/sales_funnel/sales_funnel.py | 13 | 4035 | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.accounts.report.utils import convert
import pandas as pd
@frappe.whitelist()
def get_funnel_data(from_date, to_date, company):
active_leads = frappe.db.sql("""select count(*) from `tabLead`
where (date(`modified`) between %s and %s)
and status != "Do Not Contact" and company=%s""", (from_date, to_date, company))[0][0]
active_leads += frappe.db.sql("""select count(distinct contact.name) from `tabContact` contact
left join `tabDynamic Link` dl on (dl.parent=contact.name) where dl.link_doctype='Customer'
and (date(contact.modified) between %s and %s) and status != "Passive" """, (from_date, to_date))[0][0]
opportunities = frappe.db.sql("""select count(*) from `tabOpportunity`
where (date(`creation`) between %s and %s)
and status != "Lost" and company=%s""", (from_date, to_date, company))[0][0]
quotations = frappe.db.sql("""select count(*) from `tabQuotation`
where docstatus = 1 and (date(`creation`) between %s and %s)
and status != "Lost" and company=%s""", (from_date, to_date, company))[0][0]
sales_orders = frappe.db.sql("""select count(*) from `tabSales Order`
where docstatus = 1 and (date(`creation`) between %s and %s) and company=%s""", (from_date, to_date, company))[0][0]
return [
{ "title": _("Active Leads / Customers"), "value": active_leads, "color": "#B03B46" },
{ "title": _("Opportunities"), "value": opportunities, "color": "#F09C00" },
{ "title": _("Quotations"), "value": quotations, "color": "#006685" },
{ "title": _("Sales Orders"), "value": sales_orders, "color": "#00AD65" }
]
@frappe.whitelist()
def get_opp_by_lead_source(from_date, to_date, company):
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability', 'source'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['source', 'sales_stage'], as_index=False).agg({'compound_amount': 'sum'})
result = {}
result['labels'] = list(set(df.source.values))
result['datasets'] = []
for s in set(df.sales_stage.values):
result['datasets'].append({'name': s, 'values': [0]*len(result['labels']), 'chartType': 'bar'})
for row in df.itertuples():
source_index = result['labels'].index(row.source)
for dataset in result['datasets']:
if dataset['name'] == row.sales_stage:
dataset['values'][source_index] = row.compound_amount
return result
else:
return 'empty'
@frappe.whitelist()
def get_pipeline_data(from_date, to_date, company):
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['sales_stage'], as_index=True).agg({'compound_amount': 'sum'}).to_dict()
result = {}
result['labels'] = df['compound_amount'].keys()
result['datasets'] = []
result['datasets'].append({'name': _("Total Amount"), 'values': df['compound_amount'].values(), 'chartType': 'bar'})
return result
else:
return 'empty' | gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.