repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dmitry-r/incubator-airflow | airflow/hooks/hive_hooks.py | 7 | 28649 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
spectralDNS/shenfun | demo/Stokes.py | 1 | 3177 | r"""Solve Stokes equations using a coupled formulation
The Stokes equations are in strong form
.. math::
-\nabla^2 u - \nabla p &= f \\
\nabla \cdot u &= h \\
u(x, y=\pm 1) &= 0 \\
u(x=2\pi, y) &= u(x=0, y) \\
p(x=2\pi, y) &= p(x=0, y)
where :math:`f` and :math:`g` are given functions of space.
In addition we require :math:`\int p dx = 0`, which is achieved by
fixing the coefficient :math:`\hat{p}_{0, 0} = 0`.
We use a tensorproductspace with Fourier expansions in the x-direction and
a composite Chebyshev or Legendre basis in the y-direction for ``u`` and
a regular (no boundary conditions) Chebyshev or Legendre basis for ``p``.
For the zeroth Fourier wavenumber the assembled coefficient matrix has
two nullspaces. One of these are removed by enforcing the global constraint
on the pressure. The second is removed by fixing :math:`\hat{p}_{0, N-1} = 0`.
"""
import os
import sys
import numpy as np
from sympy import symbols, sin
from shenfun import *
x, y = symbols("x,y", real=True)
# Some right hand side (manufactured solution)
uex = sin(2*y)*(1-y**2)
uey = sin(2*x)*(1-y**2)
pe = -0.1*sin(2*x)
fx = -uex.diff(x, 2) - uex.diff(y, 2) - pe.diff(x, 1)
fy = -uey.diff(x, 2) - uey.diff(y, 2) - pe.diff(y, 1)
h = uex.diff(x, 1) + uey.diff(y, 1)
N = (20, 20)
family = sys.argv[-1] if len(sys.argv) == 2 else 'Legendre'
K0 = FunctionSpace(N[0], 'Fourier', dtype='d', domain=(0, 2*np.pi))
SD = FunctionSpace(N[1], family, bc=(0, 0))
ST = FunctionSpace(N[1], family)
TD = TensorProductSpace(comm, (K0, SD), axes=(1, 0))
Q = TensorProductSpace(comm, (K0, ST), axes=(1, 0))
V = VectorSpace(TD)
VQ = CompositeSpace([V, Q])
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# Assemble blocks of complete matrix
if family.lower() == 'chebyshev':
A00 = inner(v, -div(grad(u)))
A01 = inner(v, -grad(p))
else:
A00 = inner(grad(v), grad(u))
A01 = inner(div(v), p)
A10 = inner(q, div(u))
# Create block matrix
M = BlockMatrix(A00+A01+A10)
# Get f and h on quad points
fh = Array(VQ, buffer=(fx, fy, h))
f_, h_ = fh
fh_hat = Function(VQ)
f_hat, h_hat = fh_hat
f_hat = inner(v, f_, output_array=f_hat)
h_hat = inner(q, h_, output_array=h_hat)
fh_hat.mask_nyquist()
# Solve problem using integral constraint on pressure
up_hat = M.solve(fh_hat, constraints=((2, 0, 0), (2, N[1]-1, 0)))
up_ = up_hat.backward()
u_, p_ = up_
# Exact solution
ux, uy = Array(V, buffer=(uex, uey))
pe = Array(Q, buffer=pe)
error = [comm.reduce(np.linalg.norm(ux-u_[0])),
comm.reduce(np.linalg.norm(uy-u_[1])),
comm.reduce(np.linalg.norm(pe-p_))]
if comm.Get_rank() == 0:
print('Error u v p')
print(' %2.4e %2.4e %2.4e' %(error[0], error[1], error[2]))
assert np.all(abs(np.array(error)) < 1e-8), error
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = TD.local_mesh(True)
plt.contourf(X[0], X[1], p_, 100)
plt.figure()
plt.quiver(X[0], X[1], u_[0], u_[1])
plt.figure()
plt.spy(M.diags((0, 0)).toarray()) # The matrix for Fourier given wavenumber
plt.figure()
plt.contourf(X[0], X[1], u_[0], 100)
#plt.show()
| bsd-2-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/plotting/test_groupby.py | 7 | 2591 | #!/usr/bin/env python
# coding: utf-8
import nose
from pandas import Series, DataFrame
import pandas.util.testing as tm
import numpy as np
from pandas.tests.plotting.common import TestPlotBase
""" Test cases for GroupBy.plot """
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'val': np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"],
"ByCol": [1, 2],
"Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
res = df.groupby('z').plot(kind='scatter', x='x', y='y')
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
self.assertEqual(len(res['a'].collections), 1)
res = df.groupby('z').plot.scatter(x='x', y='y')
self.assertEqual(len(res['a'].collections), 1)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
Mageluer/computational_physics_N2014301040052 | final/code/sk_learn/decision_forest.py | 1 | 1999 | import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as pl
from matplotlib.colors import ListedColormap
from sklearn.ensemble import RandomForestClassifier
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=4)
forest.fit(X_train, y_train)
y_pred = forest.predict(X_test)
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
pl.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
pl.xlim(xx1.min(), xx1.max())
pl.ylim(xx2.min(), xx2.max())
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
pl.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
pl.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1, marker='o', s=55, label='test set')
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined, y=y_combined, classifier=forest, test_idx=range(105,150))
pl.xlabel('petal length ')
pl.ylabel('petal width ')
pl.legend(loc='upper left')
pl.show()
| mit |
gabrielkirsten/cnn_keras | src/main.py | 1 | 9719 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generic classifier with multiple models
Models -> (Xception, VGG16, VGG19, ResNet50, InceptionV3, MobileNet)
Name: train.py
Author: Gabriel Kirsten Menezes ([email protected])
GitHub: https://github.com/gabrielkirsten/cnn_keras
"""
import time
import os
import argparse
import itertools
import matplotlib.pyplot as plt
import numpy as np
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress warnings
# =========================================================
# Constants and hyperparameters
# =========================================================
START_TIME = time.time()
IMG_WIDTH, IMG_HEIGHT = 256, 256
TRAIN_DATA_DIR = "../data/train"
VALIDATION_DATA_DIR = "../data/validation"
BATCH_SIZE = 16
EPOCHS = 50
LEARNING_RATE = 0.0001
CLASS_NAMES = ['ferrugemAsiatica', 'folhaSaudavel', 'fundo', 'manchaAlvo', 'mildio', 'oidio']
# =========================================================
# End of constants and hyperparameters
# =========================================================
def get_args():
"""Read the arguments of the program."""
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument("-a", "--architecture", required=True,
help="Select architecture(Xception, VGG16, VGG19, ResNet50" +
", InceptionV3, MobileNet)",
default=None, type=str)
arg_parse.add_argument("-f", "--fineTuningRate", required=True,
help="Fine tunning rate", default=None, type=int)
return vars(arg_parse.parse_args())
def plot_confusion_matrix(confusion_matrix_to_print, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints applicationsand plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(confusion_matrix_to_print, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = confusion_matrix_to_print.max() / 2.
for i, j in itertools.product(range(confusion_matrix_to_print.shape[0]),
range(confusion_matrix_to_print.shape[1])):
plt.text(j, i, format(confusion_matrix_to_print[i, j], 'd'),
horizontalalignment="center",
color="white" if confusion_matrix_to_print[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def make_confusion_matrix_and_plot(validation_generator, file_name, model_final):
"""Predict and plot confusion matrix"""
validation_features = model_final.predict_generator(validation_generator,
validation_generator.samples,
verbose=1)
plt.figure()
plot_confusion_matrix(confusion_matrix(np.argmax(validation_features, axis=1),
validation_generator.classes),
classes=CLASS_NAMES,
title='Confusion matrix - ' + file_name)
plt.savefig('../output_images/' + file_name + '.png')
print("Total time after generate confusion matrix: %s" %
(time.time() - START_TIME))
def main():
"""The main function"""
args = get_args() # read args
if args["fineTuningRate"] != -1:
if args["architecture"] == "Xception":
model = applications.Xception(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "VGG16":
model = applications.VGG16(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "VGG19":
model = applications.VGG19(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "ResNet50":
model = applications.ResNet50(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "InceptionV3":
model = applications.InceptionV3(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "MobileNet":
model = applications.MobileNet(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
# calculate how much layers won't be retrained according on fineTuningRate parameter
n_layers = len(model.layers)
last_layers = n_layers - int(n_layers * (args["fineTuningRate"] / 100.))
for layer in model.layers[:last_layers]:
layer.trainable = False
else: # without transfer learning
if args["architecture"] == "Xception":
model = applications.Xception(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "VGG16":
model = applications.VGG16(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "VGG19":
model = applications.VGG19(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "ResNet50":
model = applications.ResNet50(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "InceptionV3":
model = applications.InceptionV3(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
elif args["architecture"] == "MobileNet":
model = applications.MobileNet(
weights=None, include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
for layer in model.layers:
layer.trainable = True
# Initiate the train and test generators with data Augumentation
train_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True,
fill_mode="nearest",
zoom_range=0.3,
width_shift_range=0.3,
height_shift_range=0.3,
rotation_range=30)
train_generator = train_datagen.flow_from_directory(
TRAIN_DATA_DIR,
target_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical")
test_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True,
fill_mode="nearest",
zoom_range=0.3,
width_shift_range=0.3,
height_shift_range=0.3,
rotation_range=30)
validation_generator = test_datagen.flow_from_directory(
VALIDATION_DATA_DIR,
target_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical")
# Adding custom Layers
new_custom_layers = model.output
new_custom_layers = Flatten()(new_custom_layers)
new_custom_layers = Dense(1024, activation="relu")(new_custom_layers)
new_custom_layers = Dropout(0.5)(new_custom_layers)
new_custom_layers = Dense(1024, activation="relu")(new_custom_layers)
try:
num_classes = train_generator.num_class
except:
num_classes = train_generator.num_classes
predictions = Dense(num_classes, activation="softmax")(new_custom_layers)
# creating the final model
model_final = Model(inputs=model.input, outputs=predictions)
# compile the model
model_final.compile(loss="categorical_crossentropy",
optimizer=optimizers.SGD(lr=LEARNING_RATE, momentum=0.9),
metrics=["accuracy"])
# select .h5 filename
if args["fineTuningRate"] == 0:
file_name = args["architecture"] + \
'_transfer_learning'
elif args["fineTuningRate"] == -1:
file_name = args["architecture"] + \
'_without_transfer_learning'
else:
file_name = args["architecture"] + \
'_fine_tunning_' + str(args["fineTuningRate"])
# Save the model according to the conditions
checkpoint = ModelCheckpoint("../models_checkpoints/" + file_name + ".h5", monitor='val_acc',
verbose=1, save_best_only=True, save_weights_only=False,
mode='auto', period=1)
# Train the model
model_final.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // BATCH_SIZE,
epochs=EPOCHS,
callbacks=[checkpoint],
validation_data=validation_generator,
validation_steps=validation_generator.samples // BATCH_SIZE)
print "Total time to train: %s" % (time.time() - START_TIME)
validation_generator = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
VALIDATION_DATA_DIR,
batch_size=1,
shuffle=False,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode="categorical")
make_confusion_matrix_and_plot(
validation_generator, file_name, model_final)
if __name__ == '__main__':
main()
| mit |
bloyl/mne-python | tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py | 10 | 5666 | """
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax, event_id = -0.3, 0.6, 1
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# just use right temporal sensors for speed
epochs.pick_channels(mne.read_vectorview_selection('Right-temporal'))
evoked = epochs.average()
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
freqs = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
tfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim,
average=False, return_itc=False, n_jobs=1)
# Baseline power
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(-0.1, 0.4)
tfr_epochs.crop(-0.1, 0.4)
epochs_power = tfr_epochs.data
###############################################################################
# Define adjacency for statistics
# -------------------------------
# To compute a cluster-corrected value, we need a suitable definition
# for the adjacency/adjacency of our values. So we first compute the
# sensor adjacency, then combine that with a grid/lattice adjacency
# assumption for the time-frequency plane:
sensor_adjacency, ch_names = mne.channels.find_ch_adjacency(
tfr_epochs.info, 'grad')
# Subselect the channels we are actually using
use_idx = [ch_names.index(ch_name.replace(' ', ''))
for ch_name in tfr_epochs.ch_names]
sensor_adjacency = sensor_adjacency[use_idx][:, use_idx]
assert sensor_adjacency.shape == \
(len(tfr_epochs.ch_names), len(tfr_epochs.ch_names))
assert epochs_power.data.shape == (
len(epochs), len(tfr_epochs.ch_names),
len(tfr_epochs.freqs), len(tfr_epochs.times))
adjacency = mne.stats.combine_adjacency(
sensor_adjacency, len(tfr_epochs.freqs), len(tfr_epochs.times))
# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)
###############################################################################
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50 # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
threshold=threshold, tail=0,
adjacency=adjacency,
out_type='mask', verbose=True)
###############################################################################
# View time-frequency plots
# -------------------------
evoked_data = evoked.data
times = 1e3 * evoked.times
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
# Just plot one channel's data
ch_idx, f_idx, t_idx = np.unravel_index(
np.nanargmax(np.abs(T_obs_plot)), epochs_power.shape[1:])
# ch_idx = tfr_epochs.ch_names.index('MEG 1332') # to show a specific one
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.subplot(2, 1, 1)
plt.imshow(T_obs[ch_idx], cmap=plt.cm.gray,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(f'Induced power ({tfr_epochs.ch_names[ch_idx]})')
ax2 = plt.subplot(2, 1, 2)
evoked.plot(axes=[ax2], time_unit='s')
plt.show()
| bsd-3-clause |
b-carter/numpy | doc/source/conf.py | 3 | 10081 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.2.1":
raise RuntimeError("Sphinx 1.2.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
copyright = '2008-2017, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org', None)
}
# -----------------------------------------------------------------------------
# NumPy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
| bsd-3-clause |
danstowell/autoencoder-specgram | util.py | 1 | 2533 |
# utility functions
import numpy as np
from numpy import float32
import os, errno
from scikits.audiolab import Sndfile
from scikits.audiolab import Format
from matplotlib.mlab import specgram
from userconfig import *
########################################################
def standard_specgram(signal):
"Return specgram matrix, made using the audio-layer config"
return np.array(specgram(signal, NFFT=audioframe_len, noverlap=audioframe_len-audioframe_stride, window=np.hamming(audioframe_len))[0][specbinlow:specbinlow+specbinnum,:], dtype=float32)
def load_soundfile(inwavpath, startpossecs, maxdursecs=None):
"""Loads audio data, optionally limiting to a specified start position and duration.
Must be SINGLE-CHANNEL and matching our desired sample-rate."""
framelen = 4096
hopspls = framelen
unhopspls = framelen - hopspls
if (framelen % wavdownsample) != 0: raise ValueError("framelen needs to be a multiple of wavdownsample: %i, %i" % (framelen, wavdownsample))
if (hopspls % wavdownsample) != 0: raise ValueError("hopspls needs to be a multiple of wavdownsample: %i, %i" % (hopspls , wavdownsample))
if maxdursecs==None:
maxdursecs = 9999
sf = Sndfile(inwavpath, "r")
splsread = 0
framesread = 0
if sf.channels != 1: raise ValueError("Sound file %s has multiple channels (%i) - mono required." % (inwavpath, sf.channels))
timemax_spls = int(maxdursecs * sf.samplerate)
if sf.samplerate != (srate * wavdownsample):
raise ValueError("Sample rate mismatch: we expect %g, file has %g" % (srate, sf.samplerate))
if startpossecs > 0:
sf.seek(startpossecs * sf.samplerate) # note: returns IOError if beyond the end
audiodata = np.array([], dtype=np.float32)
while(True):
try:
if splsread==0:
chunk = sf.read_frames(framelen)[::wavdownsample]
splsread += framelen
else:
chunk = np.hstack((chunk[:unhopspls], sf.read_frames(hopspls)[::wavdownsample] ))
splsread += hopspls
framesread += 1
if framesread % 25000 == 0:
print("Read %i frames" % framesread)
if len(chunk) != (framelen / wavdownsample):
print("Not read sufficient samples - returning")
break
chunk = np.array(chunk, dtype=np.float32)
audiodata = np.hstack((audiodata, chunk))
if splsread >= timemax_spls:
break
except RuntimeError:
break
sf.close()
return audiodata
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
| mit |
DGrady/pandas | pandas/tests/io/msgpack/test_extension.py | 14 | 2204 | from __future__ import print_function
import array
import pandas.io.msgpack as msgpack
from pandas.io.msgpack import ExtType
from .common import frombytes, tobytes
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A' * 16) == b'\xd8\x42' + b'A' * 16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A' * 0x0123) == b'\xc8\x01\x23\x42' + b'A' * 0x0123 # ext 16
assert (p(b'A' * 0x00012345) ==
b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345) # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A' * 16, ExtType(0x42, b'A' * 16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A' * 0x0123,
ExtType(0x42, b'A' * 0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345,
ExtType(0x42, b'A' * 0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = tobytes(obj)
return ExtType(typecode, data)
raise TypeError("Unknwon type object %r" % (obj, ))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
frombytes(obj, data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
| bsd-3-clause |
lappsgrid-incubator/GalaxyMods | tools/plotting/plotter.py | 4 | 2247 | #!/usr/bin/env python
# python histogram input_file output_file column bins
import sys, os
import matplotlib; matplotlib.use('Agg')
from pylab import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
if __name__ == '__main__':
# parse the arguments
if len(sys.argv) != 6:
stop_err('Usage: python histogram.py input_file column bins output_file style')
sys.exit()
mode = sys.argv[5]
HIST = mode == 'hist'
try:
col = int(float(sys.argv[2]))
if HIST:
bin = int(float(sys.argv[3]))
else:
# hack, this parameter is the plotting style for scatter plots
if sys.argv[3] == 'P':
style = 'o'
elif sys.argv[3] == 'LP':
style = 'o-'
else:
style = '-'
except:
msg = 'Parameter were not numbers %s, %s' % (sys.argv[3], sys.argv[4])
stop_err(msg)
# validate arguments
inp_file = sys.argv[1]
out_file = sys.argv[4]
if HIST:
print "Histogram on column %s (%s bins)" % (col, bin)
else:
print "Scatterplot on column %s" % (col)
xcol= col -1
# read the file
values = []
try:
count = 0
for line in file(inp_file):
count += 1
line = line.strip()
if line and line[0] != '#':
values.append(float(line.split()[xcol]))
except Exception, e:
stop_err('%s' % e)
stop_err("Non numerical data at line %d, column %d" % (count, col) )
# plot the data
if HIST:
n, bins, patches = hist(values, bins=bin, normed=0)
else:
plot(values, style)
xlabel('values')
ylabel('counts')
if HIST:
title('Histogram of values over column %s (%s bins)' % (col, len(bins)) )
else:
title('Scatterplot over column %s' % col )
grid(True)
# the plotter detects types by file extension
png_out = out_file + '.png' # force it to png
savefig(png_out)
# shuffle it back and clean up
data = file(png_out, 'rb').read()
fp = open(out_file, 'wb')
fp.write(data)
fp.close()
os.remove(png_out)
| apache-2.0 |
slinderman/theano_pyglm | pyglm/plotting/roc.py | 1 | 2752 | """
Plotting for ROC curves and link prediction tests
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial
def plot_roc_curve(tprs, fprs, color='k', ax=None, subsample=1, label=None):
""" Plot an ROC curve for the given true and false positive rates.
If multiple rates are given, e.g. corresponding to multiple
networks inferred using the same procedure, compute error bars
(both horizontal and vertical) for the ROC curve.
Plot in specified color, default black.
Plot on the specified axes, or create a new axis necessary.
Subsample allows you to subsample the errorbar
"""
if ax is None:
plt.figure(frameon=False)
ax = plt.subplot(111)
if not isinstance(tprs, list):
tprs = [tprs]
if not isinstance(fprs, list):
fprs = [fprs]
# Make sure all tprs and fprs are the same length
N = tprs[0].size
for (i,tpr) in enumerate(tprs):
if not tpr.size == N:
raise Exception("All TPRs must be vectors of length %d." % N)
tprs[i] = tpr.reshape((N,1))
for (i,fpr) in enumerate(fprs):
if not fpr.size == N:
raise Exception("All FPRs must be vectors of length %d." % N)
fprs[i] = fpr.reshape((N,1))
# Stack tprs and fprs to make matrices
tprs = np.concatenate(tprs, axis=1)
fprs = np.concatenate(fprs, axis=1)
# Compute error bars (for both tpr and fpr)
mean_tprs = np.mean(tprs, axis=1)
std_tprs = np.std(tprs, axis=1)
mean_fprs = np.mean(fprs, axis=1)
std_fprs = np.std(fprs, axis=1)
# Plot the error bars
# plt.errorbar(mean_fprs, mean_tprs,
# xerr=std_fprs, yerr=std_tprs,
# ecolor=color, color=color,
# axes=ax)
err = np.concatenate([np.array([mean_fprs-std_fprs, mean_tprs+std_tprs]).T,
np.flipud(np.array([mean_fprs+std_fprs, mean_tprs-std_tprs]).T)])
from matplotlib.patches import PathPatch
from matplotlib.path import Path
plt.gca().add_patch(PathPatch(Path(err),
facecolor=color,
alpha=0.5,
edgecolor='none',
linewidth=0))
# plt.plot(err[:,0], err[:, 1],
# linestyle='--',
# color=color)
plt.plot(mean_fprs, mean_tprs,
linestyle='-',
color=color,
linewidth=2,
label=label)
# Plot the random guessing line
plt.plot([0,1],[0,1], '--k')
#plt.legend(loc='lower right')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.xlim((-0.01,1))
plt.ylim((0.0,1))
return ax
| mit |
mjirik/imtools | setup.py | 1 | 3615 | # Fallowing command is used to upload to pipy
# bumpversion patch
# python setup.py register sdist upload
# Always prefer setuptools over distutils
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
setup(
name='imtools',
description='3D data processing toolbox',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='2.1.0',
url='https://github.com/mjirik/imtools',
author='Miroslav Jirik and Tomas Ryba',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='dicom, 3D, tools, image processing',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
# install_requires=['numpy', 'scipy', 'matplotlib'],
# removed due to problems with conda build - helped a lot
install_requires=[],
# 'SimpleITK'], # Removed becaouse of errors when pip is installing
dependency_links=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| mit |
alonsopg/AuthorProfiling | src/ef_sentiword_it.py | 1 | 4044 | #!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function
import argparse
import codecs
import cPickle as pickle
import numpy as np
import csv
import os
import pandas as pd
import re
from collections import Counter
from load_tweets import load_tweets
NAME='ef_sentiword'
prefix='sentiword'
if __name__ == "__main__":
# Las opciones de línea de comando
p = argparse.ArgumentParser(NAME)
p.add_argument("DIR",default=None,
action="store", help="Directory with corpus")
p.add_argument("LIST",default=None,
action="store", help="File with list of words")
p.add_argument("-d", "--dir",
action="store", dest="dir",default="feats",
help="Default directory for features [feats]")
p.add_argument("-p", "--pref",
action="store", dest="pref",default=prefix,
help="Prefix to save the file of features %s"%prefix)
p.add_argument("--mix",
action="store_true", dest="mix",default=True,
help="Mix tweets into pefiles")
p.add_argument("--format",
action="store_true", dest="format",default="pan15",
help="Change to pan14 to use format from 2015 [feats]")
p.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
help="Verbose mode [Off]")
opts = p.parse_args()
if opts.verbose:
def verbose(*args):
print(*args)
else:
verbose = lambda *a: None
# Colecta los tweets y sus identificadores (idtweet y idusuario)
tweets,ids=load_tweets(opts.DIR,opts.format,mix=opts.mix)
# Imprime alguna información sobre los tweets
if opts.verbose:
for i,tweet in enumerate(tweets[:10]):
verbose('Tweet example',i+1,tweet[:100])
verbose("Total tweets : ",len(tweets))
try:
verbose("Total usuarios : ",len(set([id for x,id in ids])))
except ValueError:
verbose("Total usuarios : ",len(ids))
# Calculamos los features
# - Cargar lista de palabras uno
df = pd.read_csv(opts.LIST, sep = '\t')
pos_score = df[df.PosScore > 0].SynsetTerms.unique()
words_pos = set(term.split("#", 1)[0].replace("_", " ") for term in pos_score)
_regex_1 = re.compile(r"(\b{}\b)".format(r"\b|\b".join(words_pos)))
neg_score = df[df.NegScore > 0].SynsetTerms.unique()
words_neg = set(term.split("#", 1)[0].replace("_", " ") for term in neg_score)
_regex_2 = re.compile(r"(\b{}\b)".format(r"\b|\b".join(words_neg)))
#print(tweets)
count_words_pos = [
_regex_1.findall(sublista)
for sublista in tweets
]
#print('pos count')
pos_count = [
sum(1 for ocurrencia in _regex_1.findall(sublista))
for sublista in tweets
]
#print('.......')
count_words_neg = [
_regex_2.findall(sublista_2)
for sublista_2 in tweets
]
#print('neg count')
neg_count = [
sum(1 for ocurrencia_2 in _regex_2.findall(sublista_2))
for sublista_2 in tweets
]
#print ('\nEste es el count: ',pos_count)
#print ('Se reconocieron las palabras: ', count_words_pos)
# print '\n********************************************************\n'
#print ('Este es el count negativo',neg_count)
#print ('Se reconocieron las palabras en el negativo:', count_words_neg)
feats = np.vstack((np.asarray([neg_count]),np.asarray([pos_count])))
feats = feats.T
print(feats)
# Guarda la matrix de features
with open(os.path.join(opts.dir,opts.pref+'.dat'),'wb') as idxf:
pickle.dump(feats, idxf, pickle.HIGHEST_PROTOCOL)
# Imprimimos información de la matrix
verbose("Total de features :",feats.shape[1])
verbose("Total de renglones:",feats.shape[0])
# Guarda los indices por renglones de la matrix (usuario o tweet, usuario)
with open(os.path.join(opts.dir,opts.pref+'.idx'),'wb') as idxf:
pickle.dump(ids, idxf, pickle.HIGHEST_PROTOCOL)
| gpl-2.0 |
3manuek/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
awsteiner/nstar-plot | crust_plot.py | 1 | 4466 | """
-------------------------------------------------------------------
Copyright (C) 2015-2020, Andrew W. Steiner
This neutron star plot is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3 of
the License, or (at your option) any later version.
This neutron star plot is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this neutron star plot. If not, see
<http://www.gnu.org/licenses/>.
-------------------------------------------------------------------
"""
import matplotlib.pyplot as plot
import numpy
import urllib.request
import matplotlib.gridspec as gridspec
import o2sclpy
from load_crust import load_crust
lc=load_crust()
lc.load()
def latex_float(f):
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"${0} \times 10^{{{1}}}$".format(base, int(exponent))
else:
return float_str
plot.rc('text',usetex=True)
plot.rc('font',family='serif')
plot.rcParams['lines.linewidth']=0.5
fig=plot.figure(figsize=(6.0,6.0))
fig.set_facecolor('white')
gs1=gridspec.GridSpec(2,1)
ax1=fig.add_subplot(gs1[0])
ax2=fig.add_subplot(gs1[1])
gs1.update(hspace=0.4,left=0.01,bottom=0.1,right=0.94,top=0.95)
ax1.minorticks_on()
ax1.tick_params('both',length=10,width=1,which='major')
ax1.tick_params('both',length=5,width=1,which='minor')
ax1.tick_params('y',length=0,which='both')
ax2.minorticks_on()
ax2.tick_params('both',length=10,width=1,which='major')
ax2.tick_params('both',length=5,width=1,which='minor')
ax2.tick_params('y',length=0,which='both')
ax1.set_xlim([numpy.max(lc.r_nnuc),numpy.min(lc.r_nnuc)])
ax1.set_ylim([0,1])
ax1.plot(lc.r_nn,lc.w_nn,marker='o',lw=0,mfc=(0.9,0.9,1.0),
mec=(0.9,0.9,1.0),mew=0.0,ms=2.0)
for i in range(0,len(lc.r_nnuc)):
ax1.plot(lc.r_nnuc[i],lc.w_nnuc[i],
marker='.',lw=0,mfc=(0.75,0.75,1.0),mec=(0.75,0.75,1.0),
ms=lc.Rn_nnuc[i])
ax1.text(11.05,0.95,r'$\rho~(\mathrm{g}/\mathrm{cm}^3)$',fontsize=16,
va='center',ha='center',
bbox=dict(facecolor='white',lw=0))
ax1.text(10.8,1.05,latex_float(lc.rho_108),fontsize=12,
va='center',ha='center')
ax1.text(10.9,1.05,latex_float(lc.rho_109),fontsize=12,
va='center',ha='center')
ax1.text(11.0,1.05,latex_float(lc.rho_110),fontsize=12,
va='center',ha='center')
ax1.text(11.1,1.05,latex_float(lc.rho_111),fontsize=12,
va='center',ha='center')
ax1.text(11.2,1.05,latex_float(lc.rho_112),fontsize=12,
va='center',ha='center')
ax1.text(11.3,1.05,latex_float(lc.rho_113),fontsize=12,
va='center',ha='center')
ax1.text(10.83,0.5,'pasta',fontsize=16,
rotation=90,va='center',ha='center')
ax1.text(11.32,0.5,'neutron drip',fontsize=16,
rotation=90,va='center',ha='center')
ax1.text(11.08,0.5,'inner crust',fontsize=16,
va='center',ha='center')
for label in ax1.get_xticklabels():
label.set_fontsize(16)
ax1.text(11.08,-0.22,'$\mathrm{R~(km)}$',fontsize=16,
va='center',ha='center')
ax2.set_xlim([numpy.max(lc.r_nnuc_outer),numpy.min(lc.r_nnuc_outer)])
ax2.set_ylim([0,1])
for i in range(0,len(lc.r_nnuc_outer)):
ax2.plot(lc.r_nnuc_outer[i],lc.w_nnuc_outer[i],
marker='.',lw=0,mfc=(0.75,0.75,1.0),mec=(0.75,0.75,1.0),
ms=lc.Rn_nnuc_outer[i])
for label in ax2.get_xticklabels():
label.set_fontsize(16)
ax2.text(11.4,1.05,latex_float(lc.rho_114),fontsize=16,
va='center',ha='center')
ax2.text(11.5,1.05,latex_float(lc.rho_115),fontsize=16,
va='center',ha='center')
ax2.text(11.6,1.05,latex_float(lc.rho_116),fontsize=16,
va='center',ha='center')
ax2.text(11.7,1.05,latex_float(lc.rho_117),fontsize=16,
va='center',ha='center')
ax2.text(11.55,0.5,'outer crust',fontsize=16,
va='center',ha='center')
ax2.text(11.55,-0.22,'$\mathrm{R~(km)}$',fontsize=16,
va='center',ha='center')
ax2.text(11.55,0.91,r'$\rho~(\mathrm{g}/\mathrm{cm}^3)$',fontsize=16,
va='center',ha='center',
bbox=dict(facecolor='white',lw=0))
plot.savefig('crust_plot.png')
plot.show()
| gpl-3.0 |
LSSTDESC/Monitor | python/desc/monitor/truth.py | 2 | 19716 | """
A module to download the truth parameters of astrophysical objects on fatboy and
build light curves from them.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import lsst.sims.catUtils.baseCatalogModels as bcm
from lsst.sims.catUtils.supernovae import SNObject
import pymssql
import os
from lsst.utils import getPackageDir
from lsst.daf.persistence import DbAuth
from lsst.sims.photUtils import BandpassDict
__all__ = ['RefLightCurves']
class RefLightCurves(object):
"""
Class to connect to the database tables on fatboy and obtain truth
light curves
Parameters
----------
tableName : string, mandatory
case insensitive string name of table on database to connect to
for model parameters of astrophysical objects
idCol : string, optional, defaults to 'id'
column name of Index on the table
observations : `pd.DataFrame`, optional, defaults to None
if None, some information may need to be supplied in using certain
methods like lightCurve. The dataframe must have the following columns
'index' : obsHistID, 'expMJD': MJD of the time of exposure,
'fiveSigmaDepth': magnitude of a point source for which the signal to
noise ratio in that exposure would be expected to be 5.0. Such
information would be accessible from a OpSim output.
bandPassDict : instance of `lsst.sims.bandPassDict`, optional,
defaults to None
columns : tuple of strings, optional, defaults to values for SN
tuple of strings that completely specify the truth values for
the astrophysical object
dbConnection : `pymssql.connection` instance, mandatory
connection to the database where the relevant tables of catsim
objects are stored
dbCursor : `pymssql.connection.cursor` instnce, optional, defaults to None
cursor to the DataBase Connection. If None, a new cursor is obtained
from self.dbConnection
idSequence : sequence of one dimension, optional, defaults to None
sequence of unique ids in the catsim universe indexing the
astrophysical objects in the database.
dbHostName : string, optional, defaults to None
force the class to use this hostname. If not provided, the class will
set this to localhost, which is the desired hostname when using an ssh
tunnel. This parameter is useful when working from whitelisted
computers
Attributes
----------
columns : The columns that will be obtained from the table on the catsim
database
idCol : The column name on the table of the astrophysical object which
indexes the objects
tableName : Name of the table on the catsim database containing the
parameters of the astrophysical object
objectTypeID : a unique integer associated with each class of astrophysical
objects in catsim.
dbHostName : a name for hostname of the database
bandPassDict : bandpasses for the observations
observations : a set of observations incorporating information about time
of observaations and the fiveSigma Depths of the observations.
Methods
-------
Examples
--------
>>> reflc = RefLightCurves(idSequence=(6001163623700, 6000324908000),
tableName='TwinkSN',
dbConnection=DBConnection,
dbCursor=db) # doctest: +SKIP
"""
def __init__(self,
tableName,
objectTypeID=42,
idCol='id',
columns=('id', 'redshift', 'snra', 'sndec', 't0', 'x0',
'x1', 'c'),
observations=None,
bandPassDict=None,
dbConnection=None,
dbCursor=None,
dbHostName=None,
idSequence=None):
self.columns = columns
self._dbConnection = dbConnection
self._dbCursor = dbCursor
self._idSequence = idSequence
self.columns = columns
self.idCol = idCol
self.tableName = tableName
self._idvals = None
self.objectID = objectTypeID
self.dbHostName = dbHostName
self.bandPassDict = bandPassDict
self.observations = observations
@classmethod
def fromTwinklesData(cls,
tableName,
objectTypeID=42,
dbHostName=None,
idCol='id',
columns=('id', 'redshift', 'snra', 'sndec', 't0',
'x0', 'x1', 'c'),
idSequence=None):
"""
Simplified classmethod to construct this class from the Twinkles Run 1
perspective.
Parameters
----------
tableName : string, mandatory
case insensitive string name of table on database to connect to
for model parameters of astrophysical objects
idCol : string, optional, defaults to 'id'
column name of Index on the table
columns : tuple of strings, optional, defaults to values for SN
tuple of strings that completely specify the truth values for
idSequence : sequence of one dimension, optional, defaults to None
sequence of unique ids in the catsim universe indexing the
astrophysical objects in the database.
dbHostName : string, optional, defaults to None
force the class to use this hostname. If not provided, the class
will set this to localhost, which is the desired hostname when
using an ssh tunnel. This parameter is useful when working from
whitelisted computers.
Returns
------
An instance of the class RefLightCurve class where the other parameters
have been defaulted to sensible values for Twinkles Run1 Analysis.
Examples
--------
"""
data_dir = os.path.join(os.environ['MONITOR_DIR'], 'data')
opsimCsv = os.path.join(data_dir, 'SelectedKrakenVisits.csv')
opsimdf = pd.read_csv(opsimCsv, index_col='obsHistID')
observations = opsimdf[['expMJD', 'filter', 'fiveSigmaDepth']].copy()
del opsimdf
# Obtain the tuple of total, HardWare bandPassDict and keep the total
lsstBP = BandpassDict.loadBandpassesFromFiles()[0]
cls = RefLightCurves(tableName=tableName,
objectTypeID=objectTypeID,
idCol=idCol,
dbHostName=dbHostName,
columns=columns,
observations=observations,
bandPassDict=lsstBP,
idSequence=idSequence)
return cls
@property
def dbConnection(self):
"""
The pymssql connection to the catsim database used to query refrence
objects
"""
if self._dbConnection is None:
config = bcm.BaseCatalogConfig()
config.load(os.path.join(getPackageDir("sims_catUtils"), "config",
"db.py"))
username = DbAuth.username(config.host, config.port)
password = DbAuth.password(config.host, config.port)
hostname = config.host
if self.dbHostName is not None:
hostname = self.dbHostName
DBConnection = pymssql.connect(user=username,
password=password,
host=hostname,
database=config.database,
port=config.port)
return DBConnection
else:
return self._dbConnection
@property
def dbCursor(self):
"""
Cursor to the catsim database connection. This is not reset if one
exists.
"""
if self._dbCursor is None:
self._dbCursor = self.dbConnection.cursor()
return self._dbCursor
@staticmethod
def uniqueIDtoTableId(uniqueID, objTypeID, nshift=10):
"""
Given a sequence of catsim uniqueIDs, convert it to a numpy
array of IDs in the table of the object (called refIDCol) using
objTypeID.
Parameters
----------
uniqueID : 1D sequence of unique IDs as found in catsim/phosim Instance
catalogs.
objTypeID : A unique ID assigned to each class of object in the catsim
database.
nshift : integer, optional, defaults to 10
Number of bit shifts, exactly the same as in catsim.
Returns
-------
`numpy.ndarray` of IDs indexing the table of the particular object.
.. note:: This is an inverse of the catsim function
`lsst.sims.catalogs_measures.Instance.get_uniqueId`. Later on I
hope this code will be moved to a similar location.
"""
id = np.asarray(uniqueID) - objTypeID
return np.right_shift(id, nshift)
@property
def idSequence(self):
"""
An `numpy.ndarray` of IDs indexing the astrophysical objects on the
catsim database
"""
if self._idSequence is None:
return None
x = np.asarray(self._idSequence)
return self.uniqueIDtoTableId(x, objTypeID=self.objectID, nshift=10)
def allIdinTable(self, sqlconstraint='', chunksize=None):
"""
return a `pd.Series`of all IDs in the table with an optional
constraint specified as sqlconstraint. If chunkSize is not
None, but set to an integer, it returns a generator to the
series returning chunkSize values at a time.
Parameters
----------
sqlconstraint : string, optional, defaults to ''
sql constraint specified through a WHERE clause
chunksize : integer, optional, defaults to None
if not None, the return value is a generator to
a series getting chunkSize values at a time
Returns
-------
`pd.Series` of snids or a generator to it returning chunkSize values
at at time
Examples
--------
>>> ids = reflc.allIdinTable(chunksize=None) # doctest: +SKIP
>>> ids.astype(int).values.flatten() # doctest: +SKIP
>>> # `numpy.ndarray` of dtype int, having all SNIDs
>>> ids = reflc.allIdinTable(chunksize=50) # doctest: +SKIP
>>> idsnext().astype(int).values.flatten() # doctest: +SKIP
>>> # `numpy.ndarray` of dtype int, having 50 SNIDs, repeat
"""
query = """SELECT {0} FROM {1}""".format(self.idCol, self.tableName)
query += sqlconstraint
x = pd.read_sql_query(query, con=self.dbConnection,
chunksize=chunksize)
return x
def get_numObjects(self, sqlconstraint=''):
"""
return the number of objects in self.table
Parameters
----------
sqlconstraint : string, optional, defaults to ''
sql constraint specified through a WHERE clause
Returns
-------
integer number of objects in table (satisfying legal sqlconstraints if
specified)
Examples
--------
>>> reflc.get_numObjects() #doctest: +SKIP
>>> 776620
"""
query = """SELECT COUNT(*) FROM {} """.format(self.tableName)
query += sqlconstraint
self.dbCursor.execute(query)
n = self.dbCursor.fetchone()[0]
return n
def buildquery(self, idValue=None, columns=None):
"""
Return the query statement to be used to obtain model
parameters of the set of astrophysical objects
Parameters
----------
idValue : integer, optional, defaults to None
unique ID of the astrophysical object. If None, then the query
is built for all objects in idSequence. If None, then
the query is built for all objects in the table
columns : tuple of strings, optional, defaults to None
Columns that will be queried, if None, defaults to
self.columns
Returns
-------
String with a query statement to use to obtain parameters
Examples
--------
>>> q = reflc.buildquery(idValue=6144030054709290)
"""
if columns is None:
columns = self.columns
query = """SELECT """
query += ", ".join(xx.strip() for xx in columns)
query += " FROM {} ".format(self.tableName)
# if query is for a single idvalue, construct the query stmt
if idValue is not None:
tableIdValue = self.uniqueIDtoTableId(idValue, objTypeID=42,
nshift=10)
query += "WHERE {0} = {1}".format(self.idCol, tableIdValue)
# if idValue is not supplied, but an idSequence is supplied
elif self.idSequence is not None:
query += "WHERE {0} in {1}".format(self.idCol,
tuple(self.idSequence))
# Else get the entire table, no WHERE clause
else:
pass
return query
def get_params(self, idValue=None):
"""
return parameters of the objects with the desired columns as a
`pandas.DataFrame`
Parameters
----------
idValue : int, optional, defaults to None
indexes of the astrophysical objects whose parameters are to be
obtained. If idValue is None, then all astrophysical objects with
ids in `self.idSequence` are used. If `self.idSequence` is None,
then all astrophysical objects are used.
Returns
-------
A DataFrame with a single row if idValue is supplied, or a DataFrame
with all objects in the table with properties in self.columns
"""
df = pd.read_sql_query(self.buildquery(idValue=idValue),
self.dbConnection,
index_col=self.idCol,
coerce_float=False)
return df
def astro_object(self, idValue, mjdOffset=59580.):
"""
instance of the catsim representation of the astrophysical object.
Parameters
----------
idValue : int, mandatory
index of the astro_object
mjdOffset : float, optional, defaults to 59580.
offset in time parameters in the database for the transient objects
Returns
-------
Instance of astro_object with parameters from the database
Examples
--------
>>> sn = reflc.astro_object(idValue=6001163623700)
"""
df = self.get_params(idValue)
sn = SNObject(ra=df.snra.values[0], dec=df.sndec.values[0])
paramDict = dict()
for param in ['t0', 'x0', 'x1', 'c']:
paramDict[param] = df[param].values
paramDict['t0'] += mjdOffset
paramDict['z'] = df.redshift.values[0]
sn.set(**paramDict)
return sn
def lightCurve(self, idValue,
bandName=None,
observations=None,
bandPassDict=None,
format='dataframe',
keys=['expMJD', 'filter', 'fiveSigmaDepth'],
photParams=None):
"""
return the light curve of the object for observations as a
`pandas.Dataframe`.
Parameters
----------
idValue : integer, mandatory
index of astrophysical object
bandName : string, optional, defaults to None
key for bandpassDict, so that bandpassDict[key] is an instance
of `lsst.sims.photUtils.Bandpass`. If provided, only the light
curve in band band is returned
observations : optional, allowed formats decided by format variable,
defaults to None
maximal set observations corresponding to which the light curve is
obtained. If None, observations default to self.observaions. If
both observations and self.observations are None, an exception
will be raised.
bandPassDict : instance of `lsst.sims.photUtils.BandPassDict`, optional
defaults to None
dictionary of total (system + atmospheric) bandpasses as a
dictionary of bands. If None, defaults to self.bandPassDict. If
self.bandPassDict is None, an exception is raised
format: string, optional, defaults to dataframe
format in which observations are available
keys: aliases for columns time (in MJD), bandpass Name, fivesigmaDepth
(mags)
photParams: instance of `sims.photUtils.PhotParams`, optional,
defaults to None
Describes the observing conditions and telescope for the
observation. The default value of None instantiates this for the
LSST site and telescope.
Returns
-------
`pd.DataFrame` containing the following columns at the minimum
['time', 'flux', 'fluxerr', 'band']. time is in modified Julian Days,
flux and fluxerrr are in `maggies`. If a set of indexes are provided
for the m5Values (eg. as indexes in a dataFrame)
"""
format = format.lower()
if format not in ['dataframe']:
raise NotImplementedError(
"Unavailable input format {0}".format(format))
sn = self.astro_object(idValue)
timeMin = sn.mintime()
timeMax = sn.maxtime()
if bandPassDict is None:
bandPassDict = self.bandPassDict
if bandPassDict is None:
raise ValueError('The method parameter bandPassDict, and '
'the attribute bandPassDict cannot simultaneously'
' be None\n')
if observations is None:
observations = self.observations
if observations is None:
raise ValueError('The method parameter observations, and '
'the attribute observations cannot simultaneously'
' be None\n')
df = observations.query('expMJD < @timeMax and '
'expMJD > @timeMin').copy()
times = []
bands = []
fluxs = []
fluxerrs = []
m5vals = []
for ind in df.index.values:
time = df.ix[ind, 'expMJD']
band = df.ix[ind, 'filter']
flux = sn.catsimBandFlux(time=time,
bandpassobject=bandPassDict[band])
m5val = df.ix[ind, 'fiveSigmaDepth']
fluxerr = sn.catsimBandFluxError(time=time,
bandpassobject=bandPassDict[band],
m5=m5val, fluxinMaggies=flux)
times.append(time)
bands.append(band)
m5vals.append(m5val)
fluxs.append(flux)
fluxerrs.append(fluxerr)
mydict = dict()
mydict['time'] = times
mydict['flux'] = fluxs
mydict['fluxerr'] = fluxerrs
mydict['band'] = bands
mydict['m5'] = m5vals
output = pd.DataFrame(mydict, index=df.index)
if bandName is not None:
output = output.query('band == @bandName')
return output
| bsd-3-clause |
q1ang/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
biorack/metatlas | metatlas/plots/chromatograms_mp_plots.py | 1 | 5507 | from __future__ import absolute_import
import matplotlib
#matplotlib.use('Agg')
import sys
#import yaml
import os
#import multiprocessing as mp
from matplotlib import pyplot as plt
import numpy as np
import warnings
from textwrap import wrap
#warnings.filterwarnings("ignore")
from metatlas.io import metatlas_get_data_helper_fun as ma_data
def plot_chromatogram(d,file_name, ax=None):
"""
"""
if ax is None:
ax = plt.gca()
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 1:
x = np.asarray(d['data']['eic']['rt'])
y = np.asarray(d['data']['eic']['intensity'])
ax.plot(x,y,'k-',linewidth=2.0,alpha=1.0)
myWhere = np.logical_and(x>=rt_min, x<=rt_max )
ax.fill_between(x,0,y,myWhere, facecolor='c', alpha=0.3)
ax.axvline(rt_min, color='k',linewidth=2.0)
ax.axvline(rt_max, color='k',linewidth=2.0)
ax.axvline(rt_peak, color='r',linewidth=2.0)
ax.set_title("\n".join(wrap(file_name,54)),fontsize=12,weight='bold')
def plot_compounds_and_files_mp(kwargs):
#print(mp.current_process())
my_data= kwargs['data'] # data for all compounds for one file
file_name = kwargs['file_name'] # full path of output file name
nRows, nCols = kwargs['rowscols']
names = kwargs['names']
share_y = kwargs['share_y']
# plt.ioff()
f,ax = plt.subplots(nRows, nCols, sharey=share_y,figsize=(8*nCols,nRows*6))
ax = ax.flatten()
plt.rcParams['pdf.fonttype']=42
plt.rcParams['pdf.use14corefonts'] = True
# matplotlib.rc('font', family='sans-serif')
# matplotlib.rc('font', serif='Helvetica')
plt.rcParams['text.usetex'] = False
plt.rcParams.update({'font.size': 12})
plt.rcParams.update({'font.weight': 'bold'})
plt.rcParams['axes.linewidth'] = 2 # set the value globally
for i,name in enumerate(names):
plot_chromatogram(my_data[i], name, ax=ax[i])
f.savefig(file_name)
plt.close(f)
def plot_compounds_and_files(output_dir,
data,
nCols = 8,
share_y = False,
pool=None,
plot_types='both'):
'''
Parameters
----------
output_dir location of saved pdf plots
nCols number of columns per pdf file
share_y subplots share/not share they y axis
processes number of cores to use
plot_types compounds per file or files per compound or both
Returns
-------
nothing
'''
file_names = ma_data.get_file_names(data)
compound_names = ma_data.get_compound_names(data)[0]
# create directory if necessary
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# setup the parameters according to the request
if 'files' in plot_types.lower():
nRows = int(np.ceil(len(compound_names)/float(nCols)))
args_list = []
for file_idx, my_file in enumerate(file_names):
kwargs = {'data': data[file_idx],
'file_name': os.path.join(output_dir, my_file +'.pdf'),
'rowscols': (nRows, nCols),
'share_y': share_y,
'names': compound_names}
args_list.append(kwargs)
if 'compounds' in plot_types.lower():
nRows = int(np.ceil(len(file_names)/float(nCols)))
args_list = []
for compound_idx, my_compound in enumerate(compound_names):
my_data = list()
for file_idx, my_file in enumerate(file_names):
my_data.append(data[file_idx][compound_idx])
kwargs = {'data': my_data,
'file_name': os.path.join(output_dir, my_compound+'.pdf'),
'rowscols': (nRows, nCols),
'share_y': share_y,
'names': file_names}
args_list.append(kwargs)
pool.map(plot_compounds_and_files_mp, args_list)
#if __name__ == '__main__':
# #sys.path.insert(0, '/global/homes/j/jtouma/metatlas')
# import pickle
#
# # load pickled data
# info = pickle.load(open(sys.argv[1], "rb"))
# sys.path.insert(info['path_idx'], info['path'])
# from metatlas.helpers import metatlas_get_data_helper_fun as ma_data
# data = ma_data.get_dill_data(info['pkl_file'])
# file_names = ma_data.get_file_names(data)
# compound_names = ma_data.get_compound_names(data)[0]
#
# print("\n")
# print(50*'-')
# print("Number of file: " + str(len(file_names)))
# print("Number of compounds: " + str(len(compound_names)))
# if info['plot_types'].lower() == 'both':
# print("Processing both files and compounds")
# else:
# print("processing " + info['plot_types'].lower() + " only")
# print("Using " + str(info['processes']) + " out of " + str(mp.cpu_count()) + " available cores")
# print(50*'-')
# print("\n")
# plot_compounds_and_files(output_dir=info['output_dir'],
# data=data,
# compound_names=compound_names,
# file_names=file_names,
# nCols=info['nCols'],
# share_y=info['share_y'],
# processes=info['processes'],
# plot_types=info['plot_types'])
#
| bsd-3-clause |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| gpl-3.0 |
brandonrobertz/BitcoinTradingAlgorithmToolkit | src/data.py | 2 | 43175 | import processlogs2 as pl2
import indicators as ind
import dtools as dts
import datetime
import pandas as pd
import numpy as np
import cPickle
import re
import os
######################################################################
# #
# #
# #
# C O I N #
# #
# #
# #
######################################################################
class Coin:
""" This holds prices for each and any coin we want. It automatically
updates a series of averages and other statistical manipulations
of price movements.
"""
def __init__( self, **kwargs):
""" setup our basic dataframes ... raw prices, 5min rolling avg, etc
Paramz:
debug : (default False) print debugging output?
time_str : (default "1min") period to analyze ... follows
pandas notation "10s", "5min", "1h", "1d" etc
calc_rolling : (default False) calculate our rolling data?
calc_mid : (default False) standard behavior is to record
the lastprice as last tick. Changing this to True
makes the lastprice become the average of bid
vs. ask
calc_ohlc : (default False) calculate our OHLC data?
calc_indicators : (default False) controls whether or not
to create our indicators. Most rely on
OHLC and last price, so those are necessary
calc_crt : (default False) controls whether we wanna
calculate our compound returns based on
past price actionz
rolling : a custom dict specifying what averages to
compute (optional) ... should be in the following
format:
{ "time" : { N : pd.DataFrame(), ... } }
i.e.,
{ "30s" : { 12: pd.DataFrame(),
24: pd.DataFrame(),
50: pd.DataFrame() } }
std : similar to rolling. a custom dict telling Coin how
to compute our n-period averages.
i.e., a 13 and 21 period std-dev
{ 13: pd.DataFrame(), 21: pd.DataFrame() }
crt : similar to above ... but for n-period compount return
{ 1: pd.DataFrame(), 5: pd.DataFrame() }
indicators : a dict containing the indicators we want to calculate
with their parameters. i.e.,
{ "RSI" : { "data": pd.DataFrame(), "n": 14 },
"ROC" : { "data": pd.DataFrame(), "n": 20 },
"AMA" : { "data": pd.DataFrame(), "n": 10, "fn": 2.5,
"sn":30 },
"CCI" : { "data": pd.DataFrame(), "n": 20 },
"FRAMA": { "data": pd.DataFrame(), "n": 10 },
"RVI2" : { "data": pd.DataFrame(), "n": 14, "s": 10 },
"MACD" : { "data": pd.DataFrame(), "f": 12, "s": 26,
"m": 9 },
"ADX" : { "data": pd.DataFrame(), "n": 14 } }
instant : (default False) setting this to a dataframe of last
prices will trigger it to automatically calculate all
indicators and averages for the whole set in one pass
"""
self._debug = kwargs.get( "debug", False)
self.relative = kwargs.get( "relative", False)
self._calc_rolling = kwargs.get("calc_rolling", False)
self.rolling = kwargs.get( "rolling", False)
self._calc_mid = kwargs.get("calc_mid", False)
self._calc_ohlc = kwargs.get("calc_ohlc", False)
self.ohlc = kwargs.get("ohlc", False)
self._calc_indicators = kwargs.get("calc_indicators", False)
self.ti = kwargs.get( "indicators", False)
self._calc_std = kwargs.get("calc_std", False)
self.std = kwargs.get( "std", False)
self._calc_crt = kwargs.get( "calc_crt", False)
self.crt = kwargs.get( "crt", False)
self.instant = kwargs.get( "instant", False)
self.time_str = kwargs.get( "time_str", "5min")
self.verbose = kwargs.get( "verbose", False)
# this will hold moving averages, first by frequency, then by window
if self._calc_rolling:
# did we get rolling parameters sent to us?
if type( self.rolling) != dict:
# no, so set defaults
self.rolling = { self.time_str : { 12: pd.DataFrame(),
24: pd.DataFrame(),
50: pd.DataFrame() } }
# this will hold OHLC data
if self._calc_ohlc:
# some defaults if nothing provided
if type( self.ohlc) != dict:
self.ohlc = { self.time_str : pd.DataFrame() }
# here's where our tecnical indicators go
if self._calc_indicators:
if type( self.ti) != dict:
self.ti = { "RSI" : { "data": pd.DataFrame(), "n":14 },
"ROC" : { "data": pd.DataFrame(), "n":20 },
"AMA" : { "data": pd.DataFrame(), "n":10, "fn":2.5,
"sn":30 },
"CCI" : { "data": pd.DataFrame(), "n":20 },
"FRAMA": { "data": pd.DataFrame(), "n":10 },
"RVI2" : { "data": pd.DataFrame(), "n":14, "s":10 },
"MACD" : { "data": pd.DataFrame(), "f":12, "s":26,
"m":9 },
"ADX" : { "data": pd.DataFrame(), "n":14 },
"ELI" : { "data": pd.DataFrame(), "n":14 },
"TMI" : { "data": pd.DataFrame(), "nb":10, "nf":5 }
}
# running standard deviations
if self._calc_std:
# some defaults if nothing provided
if type( self.std) != dict:
self.std = { 13: pd.DataFrame(),
21: pd.DataFrame(),
34: pd.DataFrame() }
# get our n-period compound returns
if self._calc_crt:
# some defaults if nothing provided
if type( self.crt) != dict:
self.crt = { 1: pd.DataFrame() }
# iterative move ... start blank
if type(self.instant) != pd.DataFrame:
# this will hold our last prices
self.lastprice = pd.DataFrame()
# INSTANT MODE
else:
if self.verbose: print "[*] Entering one-pass 'instant' mode"
if type( self.instant) == pd.DataFrame:
# set lastprices as given price DF .. make sure its called lastprice
self.lastprice = self.instant.rename( columns={self.instant.columns[0]:"lastprice"})
# OHLC first
if self.ohlc:
for time_str in self.ohlc:
self.ohlc[time_str] = self.lastprice.lastprice.resample( time_str,
how="ohlc").ffill()
# run through all necessary rolling averages and compute them
if self._calc_rolling:
for time_str in self.rolling:
for window in self.rolling[time_str]:
# default EMA ... TODO: allow users to change this
self.rolling[time_str][window] = pd.ewma( self.lastprice.resample(
time_str, fill_method="ffill"),
span=window ,freq=time_str)
# calculate our technical indicators
if self._calc_indicators:
self._indicators()
# running standard deviations
if self._calc_std:
self._std()
# compound returns
if self._calc_crt:
self._compound()
else:
print "[!]","Error! Didn't pass instant a dataframe!"
###########################
# ADD
###########################
def add( self, price, t, **kwargs):
""" this is our main interface. w/ price & time it does the rest
PARAMZ:
price : last price from ticker
t : time of price
ba : bid/ask spread as tuple [bid, ask]
(optional if not in midprice mode)
"""
# make sure our t is a datetime
if type( t ) != datetime.datetime:
t = pd.to_datetime( t)
# get new lastprice
# if self._calc_mid = True then we're calculating the last price
# as the avg between bid/ask ... this can be a better estimate thn last
if self._calc_mid:
bid, ask = kwargs.get( "ba", [np.NaN, np.NaN])
self.lastprice = self._mid_df( bid, ask, t, "lastprice", self.lastprice)
# otherwise, we're just using lastprice
else:
self.lastprice = self._lastprice_df( price, t)
# calculate our OHLC data if needed
if self._calc_ohlc:
for time_str in self.ohlc:
self.ohlc[time_str] = self._ohlc_df( t, self.ohlc[time_str], time_str)
# run through all necessary rolling averages and compute them
if self._calc_rolling:
for time_str in self.rolling:
for window in self.rolling[time_str]:
self.rolling[time_str][window] = self._rolling( price,
t, self.rolling[time_str][window],
time_str, window)
# calculate our technical indicators
if self._calc_indicators:
# TODO: update this if we ever want to add multiple OHLC frames
self._indicators()
# running standard deviations
if self._calc_std:
self._std()
# compound returns
if self._calc_crt:
self._compound()
###########################
# COMBINE
###########################
def combine( self, name):
""" This will combine all statistical breakdowns in a coin into a
single DataFrame
name : a name to prepend all columns with, i.e., "LTC"
"""
all = pd.DataFrame()
# sort time_strs
if self._calc_rolling:
for time_str in self.rolling.keys():
for N in self.rolling[time_str]:
all = all.join( self.rolling[time_str][N].rename(
columns={"lastprice":name+"_"+"EMA_"+time_str+"_"+str(N)}),
how="outer")
# standard deviations
if self._calc_std:
for N in self.std.keys():
all = all.join( self.std[N].rename(
columns={self.std[N].columns[0]:name+"_"+self.std[N].columns[0]+"_"+str(N)}),
how="outer")
# technical indicators
if self._calc_indicators:
if type(self.ti) == dict:
for ind in self.ti.keys():
all = all.join( self.ti[ind]["data"], how="outer")
# compound returns
if self._calc_crt:
for N in self.crt.keys():
all = all.join( self.crt[N].rename(
columns={self.crt[N].columns[0]:name+"_"+self.crt[N].columns[0]}),
how="outer")
# OHLC
if self.ohlc:
for time_str in self.ohlc:
for col in self.ohlc[time_str]:
all = all.join( pd.DataFrame( { "%s_%s_%s"%(name,
self.ohlc[time_str][col].name, time_str): self.ohlc[time_str][col]},
index=[self.ohlc[time_str].index]), how="outer")
return all
###########################
# _COMPOUND (RETURN)
###########################
# TODO: update this if we ever want to add multiple OHLC frames
def _compound( self):
""" Once again, ugly ass hack, but fuck it. We're calculating the
compound returns over the past N periods as defined in our crt
dict.
"""
for time_str in self.ohlc:
for N in self.crt:
# define reutrn as return over open and close
self.crt[N] = ind.CRT( self.ohlc[time_str], N)
#self.crt[N] = ind.CRT( self.ohlc[time_str].close, N)
###########################
# _INDICATORS
###########################
def _indicators( self ):
""" This will calculate our technical indicators based on the
parameters in our ti dict ... this can be ran in one bang OR
iteratively. It goes directly to the indicator structs. Not pretty,
but what the fuck.
"""
# TODO: update this if we ever want to add multiple OHLC frames
for time_str in self.ohlc:
for indicator in self.ti:
if indicator == "RSI":
self.ti[indicator]["data"] = ind.RSI( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "ROC":
self.ti[indicator]["data"] = ind.ROC( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "AMA":
self.ti[indicator]["data"] = ind.AMA( self.ohlc[time_str].close,
self.ti[indicator]["n"],
self.ti[indicator]["fn"],
self.ti[indicator]["sn"] )
elif indicator == "CCI":
self.ti[indicator]["data"] = ind.CCI( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "FRAMA":
self.ti[indicator]["data"] = ind.FRAMA( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "RVI2":
self.ti[indicator]["data"] = ind.RVI2( self.ohlc[time_str],
self.ti[indicator]["n"],
self.ti[indicator]["s"] )
elif indicator == "MACD":
self.ti[indicator]["data"] = ind.MACD( self.ohlc[time_str],
self.ti[indicator]["f"],
self.ti[indicator]["s"],
self.ti[indicator]["m"] )
elif indicator == "ADX":
self.ti[indicator]["data"] = ind.ADX( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "ELI":
self.ti[indicator]["data"] = ind.ELI( self.ohlc[time_str],
self.ti[indicator]["n"] )
elif indicator == "TMI":
self.ti[indicator]["data"] = ind.TMI( self.ohlc[time_str],
self.ti[indicator]["nb"],
self.ti[indicator]["nf"])
###########################
# _LASTPRICE_DF
###########################
def _lastprice_df( self, price, t):
""" This will create a new DF with a price if our global lastprice
dataframe is empty, or it will append a new price to existing.
Returns: dataframe to replace global lastprice.
"""
# get our new data in a dataframe
new = pd.DataFrame( {"lastprice":price}, index=[t])
# do we have any data in our DF?
if len( self.lastprice) == 0:
# no. so start us off
return pd.DataFrame( new, columns=["lastprice"])
else:
# is our price the same as the old? and have we gone 30s without a value?
if ( ( price == self.lastprice.ix[-1][0]) and
(( t - self.lastprice.ix[-1].name).seconds < 30) ):
# same. return same ol' bullshit
return self.lastprice
else:
# no ... we got new shit, return updated DF
return self.lastprice.append( new)
###########################
# _LOOKBACK
###########################
def _lookback( self, t, time_str, window, buffer):
""" Calculate correct lookback for each time unit D, min, s, etc
lookback is the furthest date that we need to grab backwards to
supply rolling_mean with data to calculate everything.
"""
# get number supplied in time string
ns = re.sub("[^0-9]", "", time_str)
n = int(ns)
# get time unit as str
scale = time_str[ time_str.index( ns)+len(ns):]
if self._debug:
#print "scale, ns, n:", scale, ns, n
self.scale = scale; self.ns = ns
# figure out which scale we're in and calculare lookback properly
if scale == "D":
lookback = t - datetime.timedelta( days=(( n * window))*buffer)
elif scale == "min":
lookback = t - datetime.timedelta( minutes=(( n * window))*buffer)
elif scale == "s":
lookback = t - datetime.timedelta( seconds=(( n * window))*buffer)
if self._debug:
#print "lookback:", lookback
self.l = lookback
return lookback
###########################
# _MID_DF
###########################
def _mid_df( self, bid, ask, t, name, old):
""" Calculate price as average of bid and ask. We use this to
give us a more realistic expectation of what prices we could
actually get in the market than just last. I can't really decide
which price quote is better in BTC-e ... last or mid, since there
ain't no market makers/orders and ask can go up while bid stays
the same, which would give you negative profit if you sold. Use
both? Mid tend to be a little smoother with slightly diff highs and
lows.
Paramz:
bid : bid price
ask : ask price
t : time of price
name : name of column to append
old : old global dataframe
"""
# calc avg between bid and ask
price = (bid + ask) / 2.0
# get our new data in a datafr(bid + ask) / 2.0ame
new = pd.DataFrame( {name:price}, index=[t])
# do we have any data in our DF?
if len( old) == 0:
# no. so start us off
return pd.DataFrame( new, columns=[name])
else:
# is our price the same as the old? and have
# we gone less than 30s without a value?
if ( ( price == old.ix[-1][0]) and
(( t - old.ix[-1].name).seconds < 30) ):
# same. return same ol' bullshit
return old
else:
# no ... we got new shit, return updated DF
return old.append( new)
###########################
# _NEW_DF
###########################
def _new_df( self, lookback, t, window, time_str):
""" Return a new, trimmed set of recent prices for use
in rolling means.
"""
lookback2 = pl2.nearest_by_date( self.lastprice, lookback, True)
return pd.rolling_mean( self.lastprice.ix[lookback2.name:].resample( time_str,
fill_method="ffill"),
window,
freq=time_str)
###########################
# _NEW_OHLC_DF
###########################
def _new_ohlc_df( self, lookback, time_str):
""" Return a new, trimmed set of OHLC based on last prices
"""
# get nearest index behind lookback
lookback2 = pl2.nearest_by_date( self.lastprice, lookback, True)
return self.lastprice.lastprice.ix[lookback2.name:].resample( time_str,
how="ohlc")
###########################
# _OHLC_DF
###########################
def _ohlc_df( self, t, old, time_str):
lookback = self._lookback( t, time_str, 1, 3)
if self._debug:
print "OLD", old
#self.o = old
new = self._new_ohlc_df( lookback, time_str)
if self._debug:
print "new OHLC:", new
#self.new = new
# have we started it?
if len(old) == 0:
# no, so return started
return new
else:
# add extra values in new that are not in old
updated = old.combine_first( new)
# update values from new into old
updated.update( new)
if self._debug:
print "updated OHLC:", updated
#self.u = updated
return updated
###########################
# _ROLLING
###########################
def _rolling( self, price, t, old, time_str="5min", window=3, type="EMA"):
""" This will create an initial rolling average
dataframe or it will generate a new dataframe with an
updated last N min rolling value. The objective here
is to not be recalculating the *entire* rolling average
when we know we're only getting a few new values tacked
onto the end of our prices.
price : latest price to add
t : time of latest price
old : our old moving average to compare / append to
time_str : minutes to average, defaults to 5min
window : rolling window size (in multiples of time_str chunks),
defaults to 3x
type : do a Simple Moving Average (SMA) or Exponential
Moving Average (EMA), defaults to SMA
"""
if self._debug:
#print "\n_rolling"
print "old ROLLING", old
self.o = old
#print "price:", price
self.p = price
#print "t:", t
self.t = t
# buffer (extra time to look back just to compare un/changed vals
# we will multiply this times our window size to make sure we have
# everything we need in the case of a missing val, etc
buffer = 3
# get our lookback
lookback = self._lookback( t, time_str, window, buffer)
# choose an averaging scheme, then ...
# calculate our rolling average from the most recent data
# fill in any holes in our prices to the smallest we might nee
if type == "SMA":
new = self._new_df( lookback, t, window, time_str)
elif type == "EMA":
new = pd.ewma( self.lastprice.ix[lookback:].resample( time_str,
fill_method="ffill"),
span=window,
freq=time_str)
if self._debug:
print "new ROLLING", new
self.n = new
# do we have anything in our rolling avg global?
if len( old) < window:
# return this as new
return new
# if we do, then we need to find where differences start,
# shave off those from old, append new, return as new global rolling
else:
# add extra values in new that are not in old
updated = old.combine_first( new)
# update values from new into old
updated.update( new)
if self._debug:
#print "updated:", updated
self.u = updated
return updated
###########################
# _STD
###########################
def _std( self):
""" Get our n-period standard deviations
"""
# TODO: update this if we ever want to add multiple OHLC frames
for time_str in self.ohlc:
for N in self.std:
self.std[N] = ind.STD( self.ohlc[time_str].close, N)
######################################################################
# #
# #
# #
# D A T A #
# #
# #
# #
######################################################################
class Data:
""" Transparently loads new data from exchanges, either live or from
disk so that we can simulate and trade IRL using the same
framework.
"""
def __init__( self, **kwargs):
""" Set up our data structures and determine whether we're in
live or simulated mode.
time_str : (default "5min") time-frame to analyze on ... this
controls the length of each "bar" or period, can be
any pandas-recognized string, (10s, 10min, 1h, 1d, etc)
live : live or simulated mode (whether or not to read from
filename or from the web), defaults to False (simulated)
filename : name of log file to read in simulated mode ... interpreted
as ./logs/filename ... file must be in this dir
warp : whether or not to use our timedelta or just next value
for each update() ... so we can do all calculations
as fast as possible, defaults to False ("realtime")
debug : whether or not to spit out debugging info
sample_secs : if in warp-mode, N-seconds to sample on (the shorter
N, the more often we are "checking" the price and
the more iterations it will take to complete a series)
instant : (default False) Setting this to true will make Data
send the lastprice series to the Coins to calculate all
in faster, one-pass mode
ltc_opts : dict structure on what to do with LTC data ... see coin for
options from kwargs (default is same as GOX...)
Here's an example of a fully loaded options dict
{ "debug": False,
"relative": False,
"calc_rolling": False,
"rolling": { self.time_str : { 5: pd.DataFrame(),
25: pd.DataFrame(),
50: pd.DataFrame() } },
"calc_mid": False,
"calc_ohlc": True,
"ohlc": { self.time_str : pd.DataFrame() },
"calc_indicators": True,
"indicators":{ "RSI" : { "data": pd.DataFrame(), "n":14 },
"ROC" : { "data": pd.DataFrame(), "n":20 },
"AMA" : { "data": pd.DataFrame(), "n":10, "fn":2.5, "sn":30 },
"CCI" : { "data": pd.DataFrame(), "n":20 },
"FRAMA": { "data": pd.DataFrame(), "n":10 },
"RVI2" : { "data": pd.DataFrame(), "n":14, "s":10 },
"MACD" : { "data": pd.DataFrame(), "f":12, "s":26, "m":9 },
"ADX" : { "data": pd.DataFrame(), "n":14 },
"ELI" : { "data": pd.DataFrame(), "n":14 },
"TMI" : { "data": pd.DataFrame(), "nb":10, "nf":5} }
"calc_std": True,
"std": { 10: pd.DataFrame(), 50: pd.DataFrame(), 100: pd.DataFrame() },
"calc_crt": True,
"crt": { 1: pd.DataFrame(), 2: pd.DataFrame(),
3: pd.DataFrame(), 5: pd.DataFrame(),
8: pd.DataFrame() },
"instant": False,
"time_str": self.time_str }
gox_opts : dict structure on what to do with GOX BTC data ... see coin for
options from kwargs (default: everything disabled but OHLC ... )
{ "debug": False,
"relative": False,
"calc_rolling": False,
"rolling": False,
"calc_mid": False,
"calc_ohlc": True,
"ohlc": { self.time_str : pd.DataFrame() },
"calc_indicators": False,
"calc_std": False,
"std": False,
"calc_crt": False,
"crt": False,
"instant": False,
"time_str": self.time_str }
pickled_data : (default False) if this is set to a data structure,
from pickle'd pandas csv data structure, it'll take
it from here instead of from disk. Faster on multiple
iterations.
verbose : (default False) whether or not to print out shit
"""
self.live = kwargs.get("live", False)
self.filename = kwargs.get("filename", "test.csv")
self.warp = kwargs.get( "warp", True)
self._debug = kwargs.get( "debug", False)
self.sample_secs = kwargs.get( "sample_secs", 5)
self.instant = kwargs.get( "instant", False)
self.time_str = kwargs.get( "time_str", "5min")
self.verbose = kwargs.get( "verbose", False)
# default LTC options
def_ltc = { "debug": False,
"relative": False,
"calc_rolling": False,
"rolling": False,
"calc_mid": False,
"calc_ohlc": True,
"ohlc": { self.time_str : pd.DataFrame() },
"calc_indicators": False,
"indicators": False,
"calc_std": False,
"std": False,
"calc_crt": False,
"crt": False,
"instant": False,
"time_str": self.time_str }
self.ltc_opts = kwargs.get( "ltc_opts", def_ltc)
# default gox options
def_gox = { "debug": False,
"relative": False,
"calc_rolling": False,
"rolling": False,
"calc_mid": False,
"calc_ohlc": True,
"ohlc": { self.time_str : pd.DataFrame() },
"calc_indicators": False,
"indicators": False,
"calc_std": False,
"std": False,
"calc_crt": False,
"crt": False,
"instant": False,
"time_str": self.time_str }
self.gox_opts = kwargs.get( "gox_opts", def_gox)
self.pickled_data = kwargs.get( "pickled_data", False)
if self.verbose:
print "[*]", "Online" if self.live else "Offline", "mode initiated"
print "[*]", "Simulated" if not self.warp else "Speed", "mode initiated"
# if we're running simulated, set up price logs so we can query them
# in realtime as if they were actual price changes
if self.live == False:
# did we supply a pre-parsed pandas CSV data struct?
if self.pickled_data != False:
if self.verbose:
print "[*]", "Loading supplied pickle!"
data = self.pickled_data
# nope ... load from disk!
else:
# loading from CSV takes a long time, lets prepare a pickle of the
# loaded CSV if we haven't already done so, if we have then load it
filename_pick = os.path.realpath( os.path.join( "logs", self.filename+".pickle"))
if os.path.exists( filename_pick):
if self.verbose:
print "[*]", "Loading csv pickle from %s" % filename_pick
f = open( filename_pick, "rb")
data = cPickle.load( f)
f.close()
else:
filename_csv = os.path.realpath( os.path.join( "logs", self.filename))
if self.verbose: print "[*] Loading %s" % filename_csv
data = pl2.load2( filename_csv)
if self.verbose: print "[*] Generating pickle for next time to %s" % filename_pick
f = open( filename_pick, "wb")
cPickle.dump( data, f)
f.close()
# load our time-series dataframe from csv using pandas library
self._gox_offline = data["gox"]
self._ltc_offline = data["ltc"]
self._ltc_depth_offline = data["ltc_depth"]
# if we're running in non-simulated offline mode, where we just
# want to run through our historical price data as quickly as
# possible, then we build a range of dates that we will walk through
if self.warp == True:
# get our start and end points in our timerange
start = max( [ self._gox_offline.index[0], self._ltc_offline.index[0]])
end = max( [ self._gox_offline.index[-1], self._ltc_offline.index[-1]])
# our list of total dates to run through
# jump to N-seconds intervals (self.sample_secs)
if self.verbose:
print "[*]","Building daterange"
self.logrange = self._daterange( start, end, self.sample_secs)
# we're going to need to iterate through this one at a time ...
# get new values, calculate indicators, train, repeat, so we'll
# need to keep track of where we are
self.logrange_n = 0
if self.verbose:
print "[*] Dates from", start, "to", end
# otherwise we pretend we're live (slow so we can watch it IRT)
else:
# find out which has the earliest starting date. We will use
# this to calculate our timedelta. In the future when we want
# to check the price, we will use this delta compared to current
# time to grab the proper simulated price
# (we use max here so we don't get any initial NaN prices if possible)
self.delta = datetime.datetime.today() - max( [ self._gox_offline.index[0],
self._ltc_offline.index[0]])
if self.verbose: print "[*] Timedelta: %s" % self.delta
#####################################
# #
# C O I N S #
# #
#####################################
# prepare instant if necessary
if self.instant:
# seed prices with midprice
if self.ltc_opts["calc_mid"]:
filename = os.path.realpath( os.path.join( "logs",
self.filename+".midprices.pickle"))
# if midprices pickle doesn't exist, we need to generate it ... this is slow as fuck
# so we really want to have this preloaded
if os.path.exists( filename):
if self.verbose: print "[*]", "Loading midprices from %s" % filename
f = open( filename, "rb")
bas = cPickle.load( f)
else:
if self.verbose: print "[*]","Calculating midprices ..."
bas = [ pl2.bid_ask(self._ltc_depth_offline.ix[i][0],
avg=True) for i in xrange( len( self._ltc_depth_offline))]
f = open( filename, "wb")
if self.verbose: print "[*]", "Saving midprices to %s" % filename
cPickle.dump( bas, f)
self.ltc_opts["instant"] = pd.DataFrame( {"lastprice":bas},
index=[self._ltc_depth_offline.index])
# otherwise hand it lastprice
else:
self.ltc_opts["instant"] = self._ltc_offline
self.ltc = Coin( debug=self.ltc_opts["debug"],
relative=self.ltc_opts["relative"],
calc_rolling=self.ltc_opts["calc_rolling"],
rolling=self.ltc_opts["rolling"],
calc_mid=self.ltc_opts["calc_mid"],
calc_ohlc=self.ltc_opts["calc_ohlc"],
ohlc=self.ltc_opts["ohlc"],
calc_indicators=self.ltc_opts["calc_indicators"],
indicators=self.ltc_opts["indicators"],
calc_std=self.ltc_opts["calc_std"],
std=self.ltc_opts["std"],
calc_crt=self.ltc_opts["calc_crt"],
crt=self.ltc_opts["crt"],
instant=self.ltc_opts["instant"],
time_str=self.ltc_opts["time_str"],
verbose=self.verbose)
# for gox, all I want to calculate is the EMA of the last prices ...
# I chose last price, not mid, because I think that a lot of people
# are trading based on the last price ticker, not where the market
# really is.
# prepare instant if necessary
# prepare instant if necessary
if self.instant:
# seed prices with midprice
if self.gox_opts["calc_mid"]:
if self.verbose: print "[*]","Calculating midprices ..."
bas = [ pl2.bid_ask(self._gox_depth_offline.ix[i][0], avg=True) for i in xrange( len( self._gox_depth_offline))]
self.gox_opts["instant"] = pd.DataFrame( {"lastprice":bas}, index=[self._gox_depth_offline.index])
# otherwise hand it lastprice
else:
self.gox_opts["instant"] = self._gox_offline
self.gox = Coin( debug=self.gox_opts["debug"],
relative=self.gox_opts["relative"],
calc_rolling=self.gox_opts["calc_rolling"],
rolling=self.gox_opts["rolling"],
calc_mid=self.gox_opts["calc_mid"],
calc_ohlc=self.gox_opts["calc_ohlc"],
ohlc=self.gox_opts["ohlc"],
calc_indicators=self.gox_opts["calc_indicators"],
indicators=self.gox_opts["indicators"],
calc_std=self.gox_opts["calc_std"],
std=self.gox_opts["std"],
calc_crt=self.gox_opts["calc_crt"],
crt=self.gox_opts["crt"],
instant=self.gox_opts["instant"],
time_str=self.gox_opts["time_str"],
verbose=self.verbose)
def update( self):
""" Grab most recent prices from on/offline and append them to
our exchange data structures.
"""
#######################################################
# -- SIMULATION MODE -- #
#######################################################
# simulation mode. pull most recent price from our logs and
# append if different
if self.live == False:
#######################################################
# -- REAL TIME SIMULATION MODE -- #
#######################################################
# if warp is false, we will pretend this is realtime and
# grab prices from our logs using our timedelta
if self.warp == False:
# calculate our timedelta from NOW!!
adjusted_t = datetime.datetime.today() - self.delta
# Get our last prices from the logs
last_gox , last_ltc , last_ltc_depth = self._offline_prices( adjusted_t)
# make sure we got a timeseries object back, otherwise we
# hit the end of the log
if( type(last_gox) != pd.Series or
type(last_ltc) != pd.Series or
type(last_ltc_depth) != pd.Series):
if self.verbose: print "[!]", "End of log."
return False
# we have values, so add them to each coin
else:
# give coins new price changes ... them bitches'll do the rest
self.gox.add( last_gox[0], last_gox.name)
# bid-ask avg for LTC only
ba = pl2.bid_ask( last_ltc_depth[0])
self.ltc.add( last_ltc[0], last_ltc.name, ba=ba)
return True
#######################################################
# -- FAST MODE -- #
#######################################################
# otherwise, we'll grab our next price from the index
else:
# r we about to do something stupid? (hit end of the fucking log)
if self.logrange_n >= len(self.logrange):
if self.verbose: print "[!]", "End of log."
return False
# NO!
else:
# get our next date in our time index & grab the prices
t = self.logrange[self.logrange_n]
if self._debug:
print "\n_update"
print "t:", t
print "logrange:", self.logrange_n
last_gox, last_ltc, last_ltc_depth = self._offline_prices( t)
# get LTC market data (bid ask)
ba = pl2.bid_ask( last_ltc_depth[0])
# upd8 fuk'n coinz
if self._debug:
print "\n_update"
print "\nltc"
print "last_ltc:", last_ltc[0], last_ltc.name
print "ba:", ba
self.ltc.add( last_ltc[0], last_ltc.name, ba=ba)
if self._debug:
print "\ngox"
print "last_gox:", last_gox[0], last_gox.name
self.gox.add( last_gox[0], last_gox.name)
# increment for the next fucking time
self.logrange_n += 1
return True
def _daterange(self, start_date, end_date, step=5):
""" Give us a list of dates and times to run through in non-sim,
offline mode.
step : write a date every N seconds
"""
total_seconds = ((end_date - start_date).days) * 86400
total_seconds += ((end_date - start_date).seconds)
return [ (start_date + datetime.timedelta(seconds=int(n))) for n in np.arange(0, total_seconds, step)]
def _offline_prices( self, dt):
""" Return last offline prices
"""
# Get our last prices from the logs
last_gox = pl2.nearest_by_date( self._gox_offline,
dt, True)
last_ltc = pl2.nearest_by_date( self._ltc_offline,
dt, True)
last_ltc_depth = pl2.nearest_by_date( self._ltc_depth_offline,
dt, True)
return last_gox, last_ltc, last_ltc_depth
'''
## -- SAMPLE BOX -- ##
import data
import datetime
import indicators as ind
from matplotlib import pylab as plt
data = reload(data)
ind = reload(ind)
d = data.Data( warp=True)
start = datetime.datetime.now()
while d.update():
if d.logrange_n % 100 == 0:
print d.logrange_n, ((datetime.datetime.now() - start).seconds)/100.0, "per iteration"
start = datetime.datetime.now()
if d.logrange_n == 1000:
d.ltc.lastprice.join( [ d.ltc.rolling["30s"][12].rename( columns={"lastprice":"ema8"}), d.ltc.rolling["30s"][24].rename( columns={"lastprice":"ema12"}), d.ltc.rolling["30s"][50].rename( columns={"lastprice":"ema50"}), d.ltc.midprice ], how="outer").ffill().plot(); plt.show()
d.ltc.lastprice.join( [ d.ltc.ti["AMA"] ], how="outer").ffill().plot(); plt.show()
break
## -- PLOT -- ##
# prices & rollings
d.ltc.lastprice.join( [ d.ltc.rolling["1min"][8].rename( columns={"lastprice":"ema8"}), d.ltc.rolling["1min"][12].rename( columns={"lastprice":"ema12"}), d.ltc.rolling["1min"][50].rename( columns={"lastprice":"ema50"}), d.ltc.midprice ], how="outer").ffill().plot(); plt.show()
# all indicators
d.ltc.lastprice.join( [d.ltc.ti["AMA"], d.ltc.ti["RSI"], d.ltc.ti["ROC"], d.ltc.ti["CCI"]], how="outer").ffill().plot(subplots=True); plt.show()
# test AMA
ama = pd.DataFrame()
for i in range( len( d._ltc_offline)): ama = ind.AMA( ama, pd.DataFrame(), d._ltc_offline.rename( columns={"ltc_last":"lastprice"}).ix[0:i+1])
d._ltc_offline.join( [ ama ], how="outer").ffill().plot(); plt.show()
# test AMA
ama = pd.DataFrame()
for i in range( len( d._ltc_offline)): ama = ind.AMA( ama, pd.DataFrame(), d._ltc_offline.rename( columns={"ltc_last":"lastprice"}).ix[0:i+1])
d._ltc_offline.join( [ ama ], how="outer").ffill().plot(); plt.show()
# test CCI
ind = reload(ind)
cci = pd.DataFrame()
for i in range(len(ohlc)):
cci = ind.CCI( ohlc.ix[0:i+1], cci)
cci.join( [ pd.DataFrame( ohlc.close) ], how="outer").ffill().plot(subplots=True); plt.show()
'''
| gpl-3.0 |
ntvis/tushare | tushare/datayes/fundamental.py | 16 | 18026 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Fundamental():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def FdmtBS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并资产负债表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的资产负债表数据;
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBS%(reportType, secID, ticker,
beginDate, endDate, publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元。
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCF(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并现金流量表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的现金流量表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCF%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是银行业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元;5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtIS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并利润表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的利润表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTIS%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEe(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
获取2007年及以后年度上市公司披露的业绩快报中的主要财务指标等其他数据,
包括本期,去年同期,及本期与期初数值同比数据。每季证券交易所披露相关公告时更新数据,
公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEE%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEf(self, reportType='', secID='', ticker='', beginDate='', endDate='',
forecastType='', publishDateBegin='', publishDateEnd='', field=''):
"""
1、获取2007年及以后年度上市公司披露的公告中的预期下一报告期收入、净利润、归属于母公司净利润、基本每股收益及其幅度变化数据。
2、上市公司对经营成果科目的预计情况数据一般为其上限与下限,上限取值为公告中披露该科目中绝对值较大值,下限取值为公告中披露该科目中绝对值较小值。
3、数值为"正"代表该公司预计盈利,数值为"负"代表该公司预计亏损。若上下限"正"、"负"符号不同,代表该公司盈利亏损情况尚不确定。
4、业绩预期类型以公告中文字披露预期类型为准,若公告中未有文字披露预期类型,则根据数据情况判断预期类型。
5、每季证券交易所披露相关公告时更新数据,公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEF%(reportType, secID, ticker,
beginDate, endDate, forecastType,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISLately(self, field=''):
"""
1、可获取上市公司最近一次数据,根据2007年新会计准则制定的合并利润表模板,仅收集合并报表数据;
2、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
3、本表中单位为人民币元;
4、每季更新。
"""
code, result = self.client.getData(vs.FDMTISLATELY%(field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
bsipocz/astroML | astroML/clustering/mst_clustering.py | 2 | 6355 | """
Minimum Spanning Tree Clustering
"""
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.neighbors import kneighbors_graph
try:
from scipy.sparse.csgraph import (
minimum_spanning_tree, connected_components)
except ImportError:
raise ValueError("scipy v0.11 or greater required "
"for minimum spanning tree")
class HierarchicalClustering(BaseEstimator):
"""Hierarchical Clustering via Approximate Euclidean Minimum Spanning Tree
Parameters
----------
n_neighbors : int
number of neighbors of each point used for approximate Euclidean
minimum spanning tree (MST) algorithm. See Notes below.
edge_cutoff : float
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
all clusters will be kept.
Attributes
----------
X_train_ : ndarray
the training data
full_tree_ : sparse graph
the full approximate Euclidean MST spanning the data
cluster_graph_ : sparse graph
the final (truncated) graph showing clusters
n_components_ : int
the number of clusters found.
labels_ : int
the cluster labels for each training point. Labels range from -1
to n_components_ - 1: points labeled -1 are in the background (i.e.
their clusters were smaller than min_cluster_size)
Notes
-----
This routine uses an approximate Euclidean minimum spanning tree (MST)
to perform hierarchical clustering. A true Euclidean minimum spanning
tree naively costs O[N^3]. Graph traversal algorithms only help so much,
because all N^2 edges must be used as candidates. In this approximate
algorithm, we use k < N edges from each point, so that the cost is only
O[Nk log(Nk)]. For k = N, the approximation is exact; in practice for
well-behaved data sets, the result is exact for k << N.
"""
def __init__(self, n_neighbors=20,
edge_cutoff=0.9,
min_cluster_size=1):
self.n_neighbors = n_neighbors
self.edge_cutoff = edge_cutoff
self.min_cluster_size = min_cluster_size
def fit(self, X):
"""Fit the clustering model
Parameters
----------
X : array_like
the data to be clustered: shape = [n_samples, n_features]
"""
X = np.asarray(X, dtype=float)
self.X_train_ = X
# generate a sparse graph using the k nearest neighbors of each point
G = kneighbors_graph(X, n_neighbors=self.n_neighbors, mode='distance')
# Compute the minimum spanning tree of this graph
self.full_tree_ = minimum_spanning_tree(G, overwrite=True)
# Find the cluster labels
self.n_components_, self.labels_, self.cluster_graph_ =\
self.compute_clusters()
return self
def compute_clusters(self, edge_cutoff=None, min_cluster_size=None):
"""Compute the clusters given a trained tree
After fit() is called, this method may be called to obtain a
clustering result with a new edge_cutoff and min_cluster_size.
Parameters
----------
edge_cutoff : float, optional
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1. If not specified,
self.edge_cutoff will be used.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
self.min_cluster_size will be used.
Returns
-------
n_components : int
the number of clusters found
labels : ndarray
the labels of each point. Labels range from -1 to
n_components_ - 1: points labeled -1 are in the background
(i.e. their clusters were smaller than min_cluster_size)
T_trunc : sparse matrix
the truncated minimum spanning tree
"""
if edge_cutoff is None:
edge_cutoff = self.edge_cutoff
if min_cluster_size is None:
min_cluster_size = self.min_cluster_size
if not hasattr(self, 'full_tree_'):
raise ValueError("must call fit() before calling "
"compute_clusters()")
T_trunc = self.full_tree_.copy()
# cut-off edges at the percentile given by edge_cutoff
cutoff = np.percentile(T_trunc.data, 100 * edge_cutoff)
T_trunc.data[T_trunc.data > cutoff] = 0
T_trunc.eliminate_zeros()
# find connected components
n_components, labels = connected_components(T_trunc, directed=False)
counts = np.bincount(labels)
# for all components with less than min_cluster_size points, set
# to background, and re-label the clusters
i_bg = np.where(counts < min_cluster_size)[0]
for i in i_bg:
labels[labels == i] = -1
if len(i_bg) > 0:
_, labels = np.unique(labels, return_inverse=True)
labels -= 1
n_components = labels.max() + 1
# eliminate links in T_trunc which are not clusters
Eye = sparse.eye(len(labels), len(labels))
Eye.data[0, labels < 0] = 0
T_trunc = Eye * T_trunc * Eye
return n_components, labels, T_trunc
def get_graph_segments(X, G):
"""Get graph segments for plotting a 2D graph
Parameters
----------
X : array_like
the data, of shape [n_samples, 2]
G : array_like or sparse graph
the [n_samples, n_samples] matrix encoding the graph of connectinons
on X
Returns
-------
x_coords, y_coords : ndarrays
the x and y coordinates for plotting the graph. They are of size
[2, n_links], and can be visualized using
``plt.plot(x_coords, y_coords, '-k')``
"""
X = np.asarray(X)
if (X.ndim != 2) or (X.shape[1] != 2):
raise ValueError('shape of X should be (n_samples, 2)')
G = sparse.coo_matrix(G)
A = X[G.row].T
B = X[G.col].T
x_coords = np.vstack([A[0], B[0]])
y_coords = np.vstack([A[1], B[1]])
return x_coords, y_coords
| bsd-2-clause |
Adai0808/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
kensugino/jGEM | tests/test_assembler.py | 1 | 7816 | import os
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
import glob
import pandas as PD
import numpy as N
from jgem import assembler as AS
from jgem import gtfgffbed as GGB
from jgem import utils as UT
# TODO: rather than checking number of elements which can change with change in algorithm
# check actual existence of element (e.g. Snap25, Gapdh exons that should always be there)
def test_cleanup(fnobj):
fpat = fnobj.fname('*')
flist = glob.glob(fpat)
LOG.info('#files to delete {0}'.format(len(flist)))
fnobj.delete_prefixed()
flist2 = glob.glob(fpat)
assert len(flist2)==0
def test_selectsj(asm):
LOG.debug(asm.sj.head())
f = AS.SELECTSJ(asm)
assert f.fnobj == asm.fnobj
assert f.params == asm.params
chroms = ['chr1','chr2','chrX','chrM','chrZ']
df = PD.DataFrame({'chr':chroms})
assert f.chroms(df) == ['chr1','chr2','chrX']
assert len(asm.sj) == 5000
f()
#LOG.info('{0}'.format(len(asm.sj)))
assert N.sum(~asm.sj['strand'].isin(['+','-','.']))==0
assert len(asm.sj) == 4991
def test_checksjsupport(asm):
#asm.params['binth'] = 0.1
f = AS.CHECKSJSUPPORT(asm)
# assert len(asm.sj) == 4991
f()
LOG.info('{0}'.format(len(asm.sj)))
assert N.sum(~asm.sj['strand'].isin(['+','-','.']))==0
# assert len(asm.sj) == 4991
assert os.path.exists(asm.fnobj.bedname('checksjsupport.sj'))
def test_removejie(asm):
f = AS.REMOVEJIE(asm)
f()
LOG.info('{0}'.format(len(asm.sj)))
assert N.sum(~asm.sj['strand'].isin(['+','-','.']))==0
# assert len(asm.sj) == 4981
def test_sj2ex(asm):
f = AS.SJ2EX(asm)
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 5274
# assert len(asm.sj) == 4981
def test_clusterseparator(asm):
f = AS.CLUSTERSEPARATOR(asm)
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
def test_mergeexons(asm):
f = AS.MERGEEXONS(asm)
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 5341
# assert len(asm.sj) == 4981
# def test_addjie(asm):
# pass
def test_findedges2(asm):
f = AS.FINDEDGES2(asm)
# assert len(asm.sj) == 4981
# assert len(asm.me) == 5341
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 6039
# assert len(asm.sj) == 4959
def test_findedges(asm):
f = AS.FINDEDGES(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 6039
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 6039
# assert len(asm.sj) == 4959
def test_fixstrand(asm):
f = AS.FIXSTRAND(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 6039
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 6039
# assert len(asm.sj) == 4959
def test_findirets(asm):
f = AS.FINDIRETS(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 6039
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 6157
# assert len(asm.sj) == 4959
def test_edgefixer(asm):
f = AS.EDGEFIXER(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 6157
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 5753
# assert len(asm.sj) == 4959
def test_findsecovth(asm):
f = AS.FINDSECOVTH(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 5753
f()
LOG.info('{0},{1}'.format(len(asm.me),len(asm.sj)))
assert N.sum(~asm.me['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 5753
# assert len(asm.sj) == 4959
# assert abs(asm.secovth - 0.4999999999999999)<1e-6
def test_findse(asm):
f = AS.FINDSE(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 5753
f()
LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
assert N.sum(~asm.sj['strand'].isin(['+','-','.']))==0
assert N.sum(~asm.ae['strand'].isin(['+','-','.']))==0
# assert len(asm.me) == 6083
# assert len(asm.sj) == 4959
# assert len(asm.ae) == 11055
# assert len(asm.se) == 4972
def test_find53ir(asm):
f = AS.FIND53IR(asm)
# assert len(asm.sj) == 4959
# assert len(asm.me) == 6083
f()
LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
assert N.sum(~asm.ae['strand'].isin(['+','-','.']))==0
# assert len(asm.ae) == 7359 #7382
# assert len(asm.sj) == 4959
def test_calccov(asm):
f = AS.CALCCOV(asm)
f()
LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
assert N.sum(~asm.ae['strand'].isin(['+','-','.']))==0
assert 'cov' in asm.ae.columns
def test_setinfo(asm):
f = AS.SETINFO(asm)
f()
# LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
assert N.sum(~asm.ae['strand'].isin(['+','-','.']))==0
assert 'd_id' in asm.ae.columns
assert 'cat' in asm.ae.columns
def test_findgenes(asm):
f = AS.FINDGENES(asm)
f()
# LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
assert N.sum(~asm.ae['strand'].isin(['+','-','.']))==0
assert '_gidx' in asm.ae.columns
assert 'gname' in asm.ae.columns
# assert len(asm.genes) == 2008
def test_selectseme(asm):
f = AS.SELECTSEME(asm)
UT.set_exon_category(asm.sj,asm.ae)
f()
# LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
# assert len(asm.ae) == 6169 #6153
def test_fixedges2(asm):
f = AS.FIXEDGES2(asm)
f()
# LOG.info('{0},{1},{2},{3}'.format(len(asm.me),len(asm.sj),len(asm.ae),len(asm.se)))
# assert len(asm.ae) == 6338 #6311
def test_writesjex(asm):
f = AS.WRITESJEX(asm)
f()
assert os.path.exists(asm.fnobj.txtname('sj'))
assert os.path.exists(asm.fnobj.txtname('ex'))
def test_writegenes(asm, outdir):
f = AS.WRITEGENES(asm)
GBED = os.path.abspath(os.path.join(outdir, 'Fev_DR_m70_1623.chr1.17mb.genes.bed.gz'))
assert GBED not in asm.fnobj._fnames['temp']
assert GBED not in asm.fnobj._fnames['output']
f()
assert os.path.exists(asm.fnobj.bedname('genes', category='read'))
assert GBED not in asm.fnobj._fnames['temp']
assert GBED in asm.fnobj._fnames['output']
def test_delete(asm):
assert os.path.exists(asm.fnobj.txtname('sj'))==True
asm.fnobj.delete(delete=['temp'], protect=['output'])
assert os.path.exists(asm.fnobj.txtname('sj'))==True
asm.fnobj.delete(delete=['output'])
assert os.path.exists(asm.fnobj.txtname('sj'))==False
def test_assembler0(fnobj):
# instantiation (__init__)
asm1 = AS.Assembler(fnobj, False, False)
fname = fnobj.fname('assemble.params.txt')
if os.path.exists(fname):
os.unlink(fname)
assert asm1.params['merging'] == False
# check_params
asm1.check_params()
assert asm1.params['checksjsupport'] == False
assert asm1.params['override'] == False
assert os.path.exists(fname) == True
asm2 = AS.Assembler(fnobj, True, True)
assert asm2.params['merging'] == True
asm2.check_params()
assert asm2.params['checksjsupport'] == True
assert asm2.params['override'] == True
asm3 = AS.Assembler(fnobj, False, True, mpth=0.5, binth=0.1)
assert asm3.params['mpth'] == 0.5
assert asm3.params['binth'] == 0.1
asm3.check_params()
assert asm3.params['checksjsupport'] == True
assert asm3.params['override'] == True
asm3.params['newparam'] = 1
asm3.check_params()
assert asm3.params['checksjsupport'] == True
assert asm3.params['override'] == True
def test_assembler1(fnobj):
asm1 = AS.Assembler(fnobj, False, False)
asm1.assemble()
assert os.path.exists(asm1.fnobj.bedname('genes', category='read'))
| mit |
equialgo/scikit-learn | examples/feature_stacker.py | 80 | 1911 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way too high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
berkeley-stat159/project-delta | code/tests/test_hypothesis.py | 1 | 2785 | """
Test functionality of hypothesis module
Tests can be run from the main project directory with:
nosetests code/tests/test_hypothesis.py
"""
from __future__ import absolute_import, division, print_function
from nose.tools import assert_raises
from sklearn.linear_model import LogisticRegression
import nibabel as nib
import numpy as np
from nose.tools import assert_equal
from scipy.stats import gamma
import sys, os
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
sys.path.append("code/utils")
from hypothesis import *
from make_class import *
def test_ttest():
# Make temporary data files
jarrods_toys = "http://www.jarrodmillman.com/rcsds/_downloads/"
bold = jarrods_toys + "ds114_sub009_t2r1.nii"
with open("ds114_sub009_t2r1.nii", "wb") as outfile:
outfile.write(urlopen(bold).read())
conv = jarrods_toys + "ds114_sub009_t2r1_conv.txt"
with open("ds114_sub009_t2r1_conv.txt", "wb") as outfile:
outfile.write(urlopen(conv).read())
# Load BOLD and convolved data, excluding first four volumes
data = nib.load("ds114_sub009_t2r1.nii").get_data()[..., 4:]
convolved = np.loadtxt("ds114_sub009_t2r1_conv.txt")[4:]
# Construct design matrix:
# Column one is the convolved data, and
# Column two is a vetor of ones
design = np.ones((len(convolved), 2))
design[:, 0] = convolved
# Reshape the 4D data to voxel by time 2D
# Transpose to give time by voxel 2D
data_2d = np.reshape(data, (-1, data.shape[-1]))
betas = npl.pinv(design).dot(data_2d.T)
# Reshape into 4D array
betas_4d = np.reshape(betas.T, data.shape[:-1] + (-1,))
# Perform and assess the validity of the t-test
t1, p1 = ttest(design, betas, data_2d)
assert np.all(p1 >= 0)
assert np.all(p1 <= 1)
# Delete temporary test files
os.remove("ds114_sub009_t2r1.nii")
os.remove("ds114_sub009_t2r1_conv.txt")
def test_waldtest():
# Load the dummy dataset into the Python environment
obj = ds005("test", "001")
# Create necessary input variables
design_matrix = obj.design_matrix()
log_model = LogisticRegression().fit(design_matrix, obj.behav[:, 5])
beta_hat = log_model.coef_.ravel()
probability_estimates = log_model.predict_proba(design_matrix)
# Assess proper raising of assertion
design_matrix_wrong_dim = ds005("test", "001",
rm_nonresp=False).design_matrix()
assert_raises(AssertionError, waldtest, design_matrix_wrong_dim, beta_hat,
probability_estimates)
# Expect none to be statistically significant
p_values = waldtest(design_matrix, beta_hat, probability_estimates)
for p_value in p_values: assert p_value > 0.05
| bsd-3-clause |
gVallverdu/myScripts | GAUSSIAN/spectre.py | 1 | 4487 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
Plot an UV-visible spectum from the output of a td-DFT calculation. To each
transition, a gaussian function is added with a given width (in energy) and
an area equal to the oscillator strength.
Syntaxe :
spectre.py fichier [sigma, [step ] ]
fichier : fichier gaussian que l'on va lire (obligatoire)
sigma : largeur de la gaussienne (optionnel)
step : nombre de points pour construire le spectre (optionnel)
"""
__author__ = "Germain Vallverdu <[email protected]>"
__licence__ = "GPL"
__date__ = "Janvier 2014"
import os
import sys
import scipy as sp
import scipy.constants as cst
import matplotlib.pyplot as plt
from matplotlib.mlab import normpdf
def readTransitions(fichier):
""" read excitations energies from a gaussian log file
:param fichier: Name of the gaussian log file
:type fichier: string
:return: List of excitation energies, wavelength and oscillator strength
:rtype: List
"""
transitions = list()
td = False
for line in open(fichier, "r"):
if "Excitation energies" in line:
td = True
continue
if td:
if "Excited State" in line:
ene = float(line.split()[4])
lam = float(line.split()[6])
f = float(line.split("f=")[1])
transitions.append([ene, lam, f])
elif "****" in line:
break
for i, trans in enumerate(transitions):
print("%2d E = %8.4f eV ; L = %8.2f nm ; f = %6.4f" % (i+1, trans[0], trans[1], trans[2]))
return transitions
def makeSpectre(transitions, sigma, step):
""" Build a spectrum from transitions energies. For each transitions a gaussian
function of width sigma is added in order to mimick natural broadening.
:param transitions: list of transitions for readTransitions()
:type transititions: list
:param sigma: gaussian width in eV
:type sigma: float
:param step: number of absissa value
:type step: int
:return: absissa and spectrum value in this order
:rtype: list, list
"""
# max and min transition energies
minval = min([val[0] for val in transitions]) - 5.0 * sigma
maxval = max([val[0] for val in transitions]) + 5.0 * sigma
# points
npts = int((maxval - minval) / step) + 1
# absice
eneval = sp.linspace(minval, maxval, npts)
spectre = sp.zeros(npts)
for trans in transitions:
spectre += trans[2] * normpdf(eneval, trans[0], sigma)
return eneval, spectre
def plotSpectre(transitions, eneval, spectre):
""" plot the UV-visible spectrum using matplotlib. Absissa are converted in nm. """
# lambda in nm
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9 for val in eneval]
# plot gaussian spectra
plt.plot(lambdaval, spectre, "r-", label = "spectre")
# plot transitions
plt.vlines([val[1] for val in transitions], \
0., \
[val[2] for val in transitions], \
color = "blue", \
label = "transitions" )
plt.xlabel("lambda / nm")
plt.ylabel("Arbitrary unit")
plt.title("UV-visible spectra")
plt.grid()
plt.legend(fancybox = True, shadow = True)
plt.show()
def spectre(fichier, sigma, step):
""" call previous routine and make the spectra """
# read transitions
transitions = readTransitions(fichier)
# build spectrum
eneval, spectre = makeSpectre(transitions, sigma, step)
# plot spectre
plotSpectre(transitions, eneval, spectre)
line = "# Spectre\n"
line += "# column 1 : energies in eV\n"
line += "# column 2 : spectre (arbitrary unit)\n"
for x, y in zip(eneval, spectre):
line += "%12.6f %12.6f\n" % (x, y)
open("spectre.dat", "w").write(line)
if __name__ == "__main__":
fichier = sys.argv[1]
if not os.path.exists(fichier):
print("File %s does not exist" % fichier)
try:
sigma = float(sys.argv[2])
except IndexError:
sigma = 0.05
try:
step = float(sys.argv[3])
except IndexError:
step = 0.001
print("--------------------------------------")
print("File : %s" % fichier)
print("sigma : %8.4f" % sigma)
print("step : %8.4f" % step)
print("--------------------------------------")
spectre(fichier, sigma, step)
| gpl-2.0 |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.21/_downloads/49233ee4b0f10bfc9527702e6fffc90d/plot_ecog.py | 1 | 6711 | """
.. _tut_working_with_ecog:
======================
Working with ECoG data
======================
MNE supports working with more than just MEG and EEG data. Here we show some
of the functions that can be used to facilitate working with
electrocorticography (ECoG) data.
"""
# Authors: Eric Larson <[email protected]>
# Chris Holdgraf <[email protected]>
# Adam Li <[email protected]>
#
# License: BSD (3-clause)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
# paths to mne datasets - sample ECoG and FreeSurfer subject
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subject = 'sample'
subjects_dir = sample_path + '/subjects'
###############################################################################
# Let's load some ECoG electrode locations and names, and turn them into
# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the
# ``.tsv`` file.
# In this tutorial, the electrode coordinates are assumed to be in meters
elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv',
sep='\t', header=0, index_col=None)
ch_names = elec_df['name'].tolist()
ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)
ch_pos = dict(zip(ch_names, ch_coords))
# Ideally the nasion/LPA/RPA will also be present from the digitization, here
# we use fiducials estimated from the subject's FreeSurfer MNI transformation:
lpa, nasion, rpa = mne.coreg.get_mni_fiducials(
subject, subjects_dir=subjects_dir)
lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']
###############################################################################
# Now we make a :class:`mne.channels.DigMontage` stating that the ECoG
# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system.
montage = mne.channels.make_dig_montage(
ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Now we get the :term:`trans` that transforms from our MRI coordinate system
# to the head coordinate frame. This transform will be applied to the
# data when applying the montage so that standard plotting functions like
# :func:`mne.viz.plot_evoked_topomap` will be aligned properly.
trans = mne.channels.compute_native_head_t(montage)
print(trans)
###############################################################################
# Now that we have our montage, we can load in our corresponding
# time-series data and set the montage to the raw data.
# first we'll load in the sample dataset
raw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf')
# drop bad channels
raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])
raw.load_data()
raw.drop_channels(raw.info['bads'])
raw.crop(0, 2) # just process 2 sec of data for speed
# attach montage
raw.set_montage(montage)
###############################################################################
# We can then plot the locations of our electrodes on our subject's brain.
# We'll use :func:`~mne.viz.snapshot_brain_montage` to save the plot as image
# data (along with xy positions of each electrode in the image), so that later
# we can plot frequency band power on top of it.
#
# .. note:: These are not real electrodes for this subject, so they
# do not align to the cortical surface perfectly.
fig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir,
surfaces=['pial'], trans=trans, coord_frame='mri')
mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03])
xy, im = snapshot_brain_montage(fig, montage)
###############################################################################
# Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha
# (8-12 Hz) bands.
gamma_power_t = raw.copy().filter(30, 90).apply_hilbert(
envelope=True).get_data()
alpha_power_t = raw.copy().filter(8, 12).apply_hilbert(
envelope=True).get_data()
gamma_power = gamma_power_t.mean(axis=-1)
alpha_power = alpha_power_t.mean(axis=-1)
###############################################################################
# Now let's use matplotlib to overplot frequency band power onto the electrodes
# which can be plotted on top of the brain from
# :func:`~mne.viz.snapshot_brain_montage`.
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']])
# colormap to view spectral power
cmap = 'viridis'
# Create a 1x2 figure showing the average power in gamma and alpha bands.
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
# choose a colormap range wide enough for both frequency bands
_gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten()
vmin, vmax = np.percentile(_gamma_alpha_power, [10, 90])
for ax, band_power, band in zip(axs,
[gamma_power, alpha_power],
['Gamma', 'Alpha']):
ax.imshow(im)
ax.set_axis_off()
sc = ax.scatter(*xy_pts.T, c=band_power, s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_title(f'{band} band power', size='x-large')
fig.colorbar(sc, ax=axs)
###############################################################################
# Say we want to visualize the evolution of the power in the gamma band,
# instead of just plotting the average. We can use
# `matplotlib.animation.FuncAnimation` to create an animation and apply this
# to the brain figure.
# create an initialization and animation function
# to pass to FuncAnimation
def init():
"""Create an empty frame."""
return paths,
def animate(i, activity):
"""Animate the plot."""
paths.set_array(activity[:, i])
return paths,
# create the figure and apply the animation of the
# gamma frequency band activity
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.set_axis_off()
paths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
fig.colorbar(paths, ax=ax)
ax.set_title('Gamma frequency over time (Hilbert transform)',
size='large')
# sphinx_gallery_thumbnail_number = 3
# avoid edge artifacts and decimate, showing just a short chunk
show_power = gamma_power_t[:, 100:-1700:2]
anim = animation.FuncAnimation(fig, animate, init_func=init,
fargs=(show_power,),
frames=show_power.shape[1],
interval=100, blit=True)
| bsd-3-clause |
osigaud/ArmModelPython | Cython/MotorControlModel/Plot/plotFunctions.py | 3 | 28873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: plotFunctions
Description: some plotting functions
'''
import os
import random as rd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import animation
from matplotlib.mlab import griddata
plt.rc("figure", facecolor="white")
from Utils.FileReading import getStateData, getEstimatedStateData, getEstimatedXYHandData, getXYHandData, getXYEstimError, getXYEstimErrorOfSpeed, getXYElbowData, getCommandData, getNoiselessCommandData, getInitPos, getCostData, getTrajTimeData, getTrajTimeData, getLastXData
from Utils.ReadSetupFile import ReadSetupFile
from ArmModel.Arm import Arm, getDotQAndQFromStateVector
from GlobalVariables import BrentTrajectoriesFolder, pathDataFolder
#--------------------------- trajectory animations ---------------------------------------------------------------------------------------------
def trajectoriesAnimation(what, foldername = "None", targetSize = "0.05"):
rs = ReadSetupFile()
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
ec = getXYElbowData(name)
hc = getXYHandData(name)
posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni)
xEl, yEl, xHa, yHa = [], [], [], []
for key, val in ec.items():
for el in val:
xEl.append(el[0])
yEl.append(el[1])
for elhc in hc[key]:
xHa.append(elhc[0])
yHa.append(elhc[1])
fig = plt.figure()
upperArm, = plt.plot([],[])
foreArm, = plt.plot([],[])
plt.xlim(-0.7, 0.7)
plt.ylim(-0.7,0.7)
plt.plot([-0.7,0.7], [rs.YTarget, rs.YTarget])
plt.scatter([-rs.sizeOfTarget[3]/2, rs.sizeOfTarget[3]/2], [rs.YTarget, rs.YTarget], c ='g', marker='o', s=50)
plt.scatter([el[0] for el in posIni],[el[1] for el in posIni], c='b')
def init():
upperArm.set_data([0], [0])
foreArm.set_data([xEl[0]], [yEl[0]])
return upperArm, foreArm
def animate(i):
xe = (0, xEl[i])
ye = (0, yEl[i])
xh = (xEl[i], xHa[i])
yh = (yEl[i], yHa[i])
upperArm.set_data(xe, ye)
foreArm.set_data(xh, yh)
return upperArm, foreArm
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(xEl), blit=True, interval=20, repeat=True)
plt.show(block = True)
#----------------------------------------------------------------------------------------------------------------------------
#Functions related to plotting initial positions
def makeInitPlot(rs,filename):
x0 = []
y0 = []
#posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni)
posIni = np.loadtxt(pathDataFolder + filename)
for el in posIni:
x0.append(el[0])
y0.append(el[1])
#print "distance to target: " + str(rs.getDistanceToTarget(el[0],el[1]))
#xy = getInitPos(BrentTrajectoriesFolder)
xy = getInitPos(pathDataFolder+"TrajRepository/")
x, y = [], []
aa, keyy = [], []
for key, el in xy.items():
x.append(el[0])
y.append(el[1])
plt.scatter(x, y, c = "b", marker=u'o', s=10, cmap=cm.get_cmap('RdYlBu'))
plt.scatter(rs.XTarget, rs.YTarget, c = "r", marker=u'*', s = 100)
plt.scatter(x0, y0, c = "r", marker=u'o', s=25)
def plotInitPos(filename):
'''
Plots the initial position of trajectories present in the Brent directory
'''
plt.figure()
rs = ReadSetupFile()
makeInitPlot(rs,filename)
plt.show(block = True)
#----------------------------------------------------------------------------------------------------------------------------
#Functions related to velocity profiles
def makeVelocityData(rs,arm,name,media):
state = getStateData(name)
factor = min(1, 100./len(state.items()))
for k,v in state.items():
index, speed = [], []
if rd.random()<factor:
handxy = arm.mgdEndEffector([v[0][2],v[0][3]])
distance = round(rs.getDistanceToTarget(handxy[0],handxy[1]),2)
for j in range(len(v)):
index.append(j*rs.dt)
speed.append(arm.cartesianSpeed(v[j]))
if distance<=0.15:
media.plot(index, speed, c ='blue')
elif distance<=0.28:
media.plot(index, speed, c ='green')
else:
media.plot(index, speed, c ='red')
def plotVelocityProfile(what, foldername = "None"):
rs = ReadSetupFile()
arm = Arm()
plt.figure(1, figsize=(16,9))
if what == "CMAES":
for i in range(4):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Log/"
makeVelocityData(rs,arm,name,ax)
ax.set_xlabel("time (s)")
ax.set_ylabel("Instantaneous velocity (m/s)")
ax.set_title(str("Velocity profiles for target " + str(rs.sizeOfTarget[i])))
else:
if what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
makeVelocityData(rs,arm,name,plt)
plt.xlabel("time (s)")
plt.ylabel("Instantaneous velocity (m/s)")
plt.title("Velocity profiles for " + what)
plt.savefig("ImageBank/"+what+'_velocity_profiles'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
# ------------------------- positions, trajectories ---------------------------------
# factor is used to plot no more than 100 trajectories. If there are more, they are drawn randomly
def plotPos(name, media, plotEstim):
states = getXYHandData(name)
factor = min(1, 100./len(states.items()))
for k,v in states.items():
if rd.random()<factor:
posX, posY = [], []
for j in range(len(v)):
posX.append(v[j][0])
posY.append(v[j][1])
media.plot(posX,posY, c ='b')
if plotEstim==True:
estimStates = getEstimatedXYHandData(name)
for k,v in estimStates.items():
if rd.random()<factor:
eX, eY = [], []
for j in range(len(v)):
eX.append(v[j][0])
eY.append(v[j][1])
media.plot(eX,eY, c ='r')
def plotEstimError(rs,name, media):
errors = getXYEstimError(name)
factor = min(1, 100./len(errors.items()))
for k,v in errors.items():
if rd.random()<factor:
index, er = [], []
for j in range(len(v)):
# for j in range(20):
# index.append(j*rs.dt)
index.append(j)
er.append(v[j])
media.plot(index,er, c ='b')
def plotEstimErrorOfSpeed(name, media):
errors = getXYEstimErrorOfSpeed(name)
factor = min(1, 100./len(errors.items()))
for k,v in errors.items():
if rd.random()<factor:
speed, er = [], []
for j in range(len(v)):
speed.append(v[j][0])
er.append(v[j][1])
media.plot(speed,er, c ='b')
def plotTrajsInRepo():
rs = ReadSetupFile()
plt.figure(1, figsize=(16,9))
plotPos(pathDataFolder+"TrajRepository/", plt, False)
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.title("XY Positions")
plt.savefig("ImageBank/TrajRepo.png", bbox_inches='tight')
#plt.savefig("ImageBank/"+what+'_trajectories.png')
plt.show(block = True)
def plotXYPositions(what, foldername = "None", targetSize = "All", plotEstim=False):
rs = ReadSetupFile()
plt.figure(1, figsize=(16,9))
if what == "CMAES" and targetSize == "All":
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Log/"
plotPos(name, ax, plotEstim)
#makeInitPlot(rs)
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
ax.set_title("XY Positions for target " + str(rs.sizeOfTarget[i]))
else:
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
plotPos(name, plt, plotEstim)
#makeInitPlot(rs)
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.title("XY Positions for " + what)
plt.savefig("ImageBank/"+what+'_trajectories'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
def plotXYEstimError(what, foldername = "None", targetSize = "All"):
rs = ReadSetupFile()
plt.figure(1, figsize=(16,9))
if what == "CMAES" and targetSize == "All":
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Log/"
plotEstimError(rs,name, ax)
#makeInitPlot(rs)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Estimation error (m)")
ax.set_title("Estimation error for target " + str(rs.sizeOfTarget[i]))
else:
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
plotEstimError(rs,name, plt)
#makeInitPlot(rs)
plt.xlabel("Time (s)")
plt.ylabel("Estimation error (m)")
plt.title("Estimation error Positions for " + what)
plt.savefig("ImageBank/"+what+'_estimError'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
def plotXYEstimErrorOfSpeed(what, foldername = "None", targetSize = "All"):
rs = ReadSetupFile()
plt.figure(1, figsize=(16,9))
if what == "CMAES" and targetSize == "All":
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Log/"
plotEstimErrorOfSpeed(name, ax)
#makeInitPlot(rs)
ax.set_xlabel("Velocity (m/s)")
ax.set_ylabel("Estimation error (m)")
ax.set_title("Estimation error function of velocity for target " + str(rs.sizeOfTarget[i]))
else:
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
plotEstimErrorOfSpeed(name, plt)
#makeInitPlot(rs)
plt.xlabel("Velocity (m/s)")
plt.ylabel("Estimation error (m)")
plt.title("Estimation error function of velocity for " + what)
plt.savefig("ImageBank/"+what+'_estimError'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
def plotArticularPositions(what, foldername = "None", targetSize = "0.05"):
rs = ReadSetupFile()
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
state = getStateData(name)
plt.figure(1, figsize=(16,9))
for k,v in state.items():
if rd.random()<0.06 or what != "Brent":
Q1, Q2 = [], []
for j in range(len(v)):
Q1.append(v[j][2])
Q2.append(v[j][3])
plt.plot(Q1,Q2, c ='b')
plt.xlabel("Q1 (rad)")
plt.ylabel("Q2 (rad)")
plt.title("Articular positions for " + what)
plt.savefig("ImageBank/"+what+'_articular'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
#------------------ muscular activations --------------------------------
def plotMuscularActivations(what, foldername = "None", targetSize = "0.05"):
'''
plots the muscular activations from a folder
input: -foldername: the folder where the data lies
-what: get from Brent, rbfn or from cmaes controllers
'''
rs = ReadSetupFile()
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Log/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Log/"
U = getNoiselessCommandData(name)
for key, el1 in U.items():
t = []
u1, u2, u3, u4, u5, u6 = [], [], [], [], [], []
if rd.random()<0.01 or what != "Brent":
for i in range(len(el1)):
t.append(i)
u1.append(el1[i][0])
u2.append(el1[i][1])
u3.append(el1[i][2])
u4.append(el1[i][3])
u5.append(el1[i][4])
u6.append(el1[i][5])
plt.figure()
plt.plot(t, u1, label = "U1")
plt.plot(t, u2, label = "U2")
plt.plot(t, u3, label = "U3")
plt.plot(t, u4, label = "U4")
plt.plot(t, u5, label = "U5")
plt.plot(t, u6, label = "U6")
plt.legend(loc = 0)
plt.xlabel("time")
plt.ylabel("U")
plt.title("Muscular Activations for " + what)
plt.savefig("ImageBank/"+what+"_muscu" + key +foldername + ".png", bbox_inches='tight')
print key
val = raw_input('1 to see data, anything otherwise: ')
val = int(val)
if val == 1:
print el1
#plt.clf()
plt.show(block = True)
#-------------------------- cost maps ----------------------------------------------
def plotCostColorMap(what, foldername = "None", targetSize = "All"):
'''
Cette fonction permet d'afficher le profil de cout des trajectoires
Entrees: -what: choix des donnees a afficher
'''
rs = ReadSetupFile()
fig = plt.figure(1, figsize=(16,9))
if what == "CMAES" and targetSize == "All":
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Cost/"
costs = getCostData(name)
x0 = []
y0 = []
cost = []
for k, v in costs.items():
for j in range(len(v)):
x0.append(v[j][0])
y0.append(v[j][1])
cost.append(v[j][2])
xi = np.linspace(-0.4,0.4,100)
yi = np.linspace(0.12,0.58,100)
zi = griddata(x0, y0, cost, xi, yi)
t1 = ax.scatter(x0, y0, c=cost, marker=u'o', s=5, cmap=cm.get_cmap('RdYlBu'))
ax.scatter(rs.XTarget, rs.YTarget, c ='g', marker='v', s=200)
CS = ax.contourf(xi, yi, zi, 15, cmap=cm.get_cmap('RdYlBu'))
fig.colorbar(t1, shrink=0.5, aspect=5)
t1 = ax.scatter(x0, y0, c='b', marker=u'o', s=20)
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
ax.set_title(str("Cost map for target " + str(rs.sizeOfTarget[i])))
else:
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/Cost/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/Cost/"
costs = getCostData(name)
x0 = []
y0 = []
cost = []
for k, v in costs.items():
for j in range(len(v)):
x0.append(v[j][0])
y0.append(v[j][1])
cost.append(v[j][2])
xi = np.linspace(-0.4,0.4,100)
yi = np.linspace(0.12,0.58,100)
zi = griddata(x0, y0, cost, xi, yi)
t1 = plt.scatter(x0, y0, c=cost, marker=u'o', s=5, cmap=cm.get_cmap('RdYlBu'))
plt.scatter(rs.XTarget, rs.YTarget, c ='g', marker='v', s=200)
CS = plt.contourf(xi, yi, zi, 15, cmap=cm.get_cmap('RdYlBu'))
fig.colorbar(t1, shrink=0.5, aspect=5)
t1 = plt.scatter(x0, y0, c='b', marker=u'o', s=20)
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.title("Cost map for " + what)
plt.savefig("ImageBank/"+what+'_costmap'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
#-------------------------- time maps ----------------------------------------------
def plotTimeColorMap(what, foldername = "None", targetSize = "All"):
'''
Cette fonction permet d'afficher le profil de temps des trajectoires
Entrees: -what: choix des donnees a afficher
'''
rs = ReadSetupFile()
fig = plt.figure(1, figsize=(16,9))
if what == "CMAES" and targetSize == "All":
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/TrajTime/"
times = getTrajTimeData(name)
x0 = []
y0 = []
time = []
for k, v in times.items():
for j in range(len(v)):
x0.append(v[j][0])
y0.append(v[j][1])
time.append(v[j][2])
xi = np.linspace(-0.4,0.4,100)
yi = np.linspace(0.12,0.58,100)
zi = griddata(x0, y0, time, xi, yi)
t1 = ax.scatter(x0, y0, c=time, marker=u'o', s=50, cmap=cm.get_cmap('RdYlBu'))
ax.scatter(rs.XTarget, rs.YTarget, c ='g', marker='v', s=200)
CS = ax.contourf(xi, yi, zi, 15, cmap=cm.get_cmap('RdYlBu'))
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
ax.set_title(str("Time map for target " + str(rs.sizeOfTarget[i])))
fig.colorbar(t1, shrink=0.5, aspect=5)
t1 = ax.scatter(x0, y0, c='b', marker=u'o', s=20)
else:
if what == "CMAES":
name = rs.CMAESpath + targetSize + "/" + foldername + "/TrajTime/"
elif what == "Brent":
name = BrentTrajectoriesFolder
else:
name = rs.RBFNpath + foldername + "/TrajTime/"
times = getTrajTimeData(name)
x0 = []
y0 = []
time = []
for k, v in times.items():
for j in range(len(v)):
x0.append(v[j][0])
y0.append(v[j][1])
time.append(v[j][2])
xi = np.linspace(-0.4,0.4,100)
yi = np.linspace(0.12,0.58,100)
zi = griddata(x0, y0, time, xi, yi)
t1 = plt.scatter(x0, y0, c=time, marker=u'o', s=50, cmap=cm.get_cmap('RdYlBu'))
plt.scatter(rs.XTarget, rs.YTarget, c ='g', marker='v', s=200)
CS = plt.contourf(xi, yi, zi, 15, cmap=cm.get_cmap('RdYlBu'))
fig.colorbar(t1, shrink=0.5, aspect=5)
plt.scatter(x0, y0, c='b', marker=u'o', s=20)
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.savefig("ImageBank/"+what+'_timemap'+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
#-----------------------------------------------------------------------------------------------------------
def plotTimeDistanceTarget(foldername):
rs = ReadSetupFile()
dicoTime = {}
for i in range(len(rs.sizeOfTarget)):
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/TrajTime/"
trajTimes = getTrajTimeData(name)
for k, v in trajTimes.items():
for j in range(len(v)):
distance = round(rs.getDistanceToTarget(v[j][0],v[j][1]),2)
if not distance in dicoTime.keys():
dicoTime[distance] = {}
if not rs.sizeOfTarget[i] in dicoTime[distance].keys():
dicoTime[distance][rs.sizeOfTarget[i]] = []
dicoTime[distance][rs.sizeOfTarget[i]].append(v[j][2])
plotTab = []
fig = plt.figure(1, figsize=(16,9))
plt.ylabel("time (s)")
plt.xlabel("Target size (mm)")
for key in sorted(dicoTime.keys()):
plotTab.append(plt.plot([i for i in sorted(dicoTime[key].keys())], [np.mean(dicoTime[key][i]) for i in sorted(dicoTime[key].keys())], label = str("Distance: " + str(key))))
plt.legend(loc = 0)
plt.savefig("ImageBank/timedist"+foldername+'.png', bbox_inches='tight')
plt.show(block = True)
#-----------------------------------------------------------------------------------------------------------
def plotPerfSizeDist(foldername):
rs = ReadSetupFile()
dicoCost = {}
for i in range(len(rs.sizeOfTarget)):
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/Cost/"
costs = getCostData(name)
for k, v in costs.items():
for j in range(len(v)):
distance = round(rs.getDistanceToTarget(v[j][0],v[j][1]),2)
if not distance in dicoCost.keys():
dicoCost[distance] = {}
if not rs.sizeOfTarget[i] in dicoCost[distance].keys():
dicoCost[distance][rs.sizeOfTarget[i]] = []
dicoCost[distance][rs.sizeOfTarget[i]].append(v[j][2])
plotTab = []
fig = plt.figure(1, figsize=(16,9))
plt.ylabel("performance")
plt.xlabel("Target size (mm)")
for key in sorted(dicoCost.keys()):
plotTab.append(plt.plot([i for i in sorted(dicoCost[key].keys())], [np.mean(dicoCost[key][i]) for i in sorted(dicoCost[key].keys())], label = str("Distance: " + str(key))))
plt.legend(loc = 0)
plt.savefig("ImageBank/perfdist"+foldername+".png", bbox_inches='tight')
plt.show(block = True)
#-----------------------------------------------------------------------------------------------------------
def plotFittsLaw(foldername, rbfn = False):
rs = ReadSetupFile()
timeDistWidth = []
for i in range(len(rs.sizeOfTarget)):
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/TrajTime/"
trajTimes = getTrajTimeData(name)
for k, v in trajTimes.items():
for j in range(len(v)):
distance = rs.getDistanceToTarget(v[j][0],v[j][1])
trajtime = v[j][2]
size = rs.sizeOfTarget[i]
timeDistWidth.append((distance, size, trajtime))
MT, DI = [], []
for el in timeDistWidth:
MT.append(el[2])
DI.append(np.log2(el[0]/el[1]))
slope, intercept, r_value, p_value, std_err = stats.linregress(DI,MT)
yLR = slope * np.asarray(DI) + intercept
plt.figure()
for el in timeDistWidth:
if el[0]<=0.15:
plt.scatter(np.log2(el[0]/el[1]), el[2], c ='blue')
elif el[0]<=0.28:
plt.scatter(np.log2(el[0]/el[1]), el[2], c ='green')
else:
plt.scatter(np.log2(el[0]/el[1]), el[2], c ='red')
plt.plot(DI, yLR)
plt.title("a = " + str(slope) + " b = " + str(intercept) + " r^2 = " + str(r_value**2))
plt.xlabel("log(D/W)/log(2)")
plt.ylabel("Movement time (s)")
plt.savefig("ImageBank/fitts"+foldername+".png", bbox_inches='tight')
plt.show(block = True)
# ---------------- hit dispersion ---------------------------------------
def plotHitDispersion(foldername,sizeT):
rs = ReadSetupFile()
name = rs.CMAESpath + sizeT + "/" + foldername + "/finalX/"
data = getLastXData(name)
tabx, taby = [], []
for el in data.values():
for j in range(len(el)):
tabx.append(el[j])
taby.append(rs.YTarget)
fig = plt.figure(1, figsize=(16,9))
plt.plot([-rs.sizeOfTarget[0]/2, rs.sizeOfTarget[0]/2], [rs.YTarget, rs.YTarget], c = 'r')
plt.scatter([-rs.sizeOfTarget[0]/2, rs.sizeOfTarget[0]/2], [rs.YTarget, rs.YTarget], marker=u'|', s = 100)
plt.scatter(tabx, taby, c = 'b')
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.savefig("ImageBank/hit" + str(sizeT) +foldername + ".png", bbox_inches='tight')
plt.show(block = True)
def plotScattergram(what,foldername):
rs = ReadSetupFile()
data = {}
if what=="CMAES":
for i in range(len(rs.sizeOfTarget)):
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/" + foldername + "/finalX/"
tmp = getLastXData(name)
tabx = []
for el in tmp.values():
for j in range(len(el)):
tabx.append(el[j])
data[rs.sizeOfTarget[i]] = tabx
plt.figure(1, figsize=(16,9))
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
ax.hist(data[rs.sizeOfTarget[i]], 20)
ax.plot([-rs.sizeOfTarget[i]/2, -rs.sizeOfTarget[i]]/2, [0, 500], c = 'r', linewidth = 3)
ax.plot([rs.sizeOfTarget[i]/2, rs.sizeOfTarget[i]]/2, [0, 500], c = 'r', linewidth = 3)
ax.set_title(str("Hit Dispersion for Target " + str(rs.sizeOfTarget[i])))
elif what=="RBFN":
name = rs.RBFNpath + foldername + "/finalX/"
tmp = getLastXData(name)
tabx = []
for el in tmp.values():
for j in range(len(el)):
tabx.append(el[j])
plt.hist(tabx, 20)
for i in range(len(rs.sizeOfTarget)):
plt.plot([-rs.sizeOfTarget[i]/2, -rs.sizeOfTarget[i]]/2, [0, 20], c = 'r', linewidth = 3)
plt.plot([rs.sizeOfTarget[i]/2, rs.sizeOfTarget[i]]/2, [0, 20], c = 'r', linewidth = 3)
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.title("Hit Dispersion for RBFN")
plt.savefig("ImageBank/"+what+"_hitdisp"+foldername+".png", bbox_inches='tight')
plt.show(block = True)
# ---------------- end of hit dispersion ---------------------------------------
def plotCMAESProgress():
plotCMAESCostProgress()
plotCMAESTimeProgress()
def plotCMAESCostProgress():
rs = ReadSetupFile()
fig = plt.figure(1, figsize=(16,9))
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/Cost/cmaesCost.log"
data = np.loadtxt(name)
x,w,m,b = [],[],[],[]
for j in range(len(data)):
x.append(j)
w.append(data[j][0])
m.append(data[j][1])
b.append(data[j][2])
ax.plot(x, w, c = 'b')
ax.plot(x, m, c = 'g')
ax.plot(x, b, c = 'r')
ax.set_title(str("Cost Target " + str(rs.sizeOfTarget[i])))
plt.savefig("ImageBank/costProgress.png")
plt.show(block = True)
def plotCMAESTimeProgress():
rs = ReadSetupFile()
fig = plt.figure(1, figsize=(16,9))
for i in range(len(rs.sizeOfTarget)):
ax = plt.subplot2grid((2,2), (i/2,i%2))
name = rs.CMAESpath + str(rs.sizeOfTarget[i]) + "/Cost/cmaesTime.log"
data = np.loadtxt(name)
x,w,m,b = [],[],[],[]
for j in range(len(data)):
x.append(j)
w.append(data[j][0])
m.append(data[j][1])
b.append(data[j][2])
ax.plot(x, w, c = 'b')
ax.plot(x, m, c = 'g')
ax.plot(x, b, c = 'r')
ax.set_title(str("Time Target " + str(rs.sizeOfTarget[i])))
plt.savefig("ImageBank/timeProgress.png")
plt.show(block = True)
def plotExperimentSetup():
rs = ReadSetupFile()
fig = plt.figure(1, figsize=(16,9))
arm = Arm()
q1 = np.linspace(-0.6, 2.6, 100, True)
q2 = np.linspace(-0.2, 3, 100, True)
posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni)
xi, yi = [], []
xb, yb = [0], [0]
t = 0
for el in posIni:
if el[1] == np.min(posIni, axis = 0)[1] and t == 0:
t += 1
a, b = arm.mgi(el[0], el[1])
a1, b1 = arm.mgdFull(np.array([[a], [b]]))
xb.append(a1[0])
xb.append(b1[0])
yb.append(a1[1])
yb.append(b1[1])
xi.append(el[0])
yi.append(el[1])
pos = []
for i in range(len(q1)):
for j in range(len(q2)):
coordHa = arm.mgdEndEffector(np.array([[q1[i]], [q2[j]]]))
pos.append(coordHa)
x, y = [], []
for el in pos:
x.append(el[0])
y.append(el[1])
plt.scatter(x, y)
plt.scatter(xi, yi, c = 'r')
plt.scatter(0, 0.6175, c = "r", marker=u'*', s = 200)
plt.plot(xb, yb, c = 'r')
plt.plot([-0.3,0.3], [0.6175, 0.6175], c = 'g')
plt.savefig("ImageBank/setup.png", bbox_inches='tight')
plt.show(block = True)
| gpl-2.0 |
ManuSchmi88/landlab | landlab/components/uniform_precip/examples/uniform_precip_test.py | 5 | 3696 | """ uniform_precip_test.py
Sample driver file for the PrecipitationDistribution component.
This file will generate storm characteristics, update those characteristics,
and finally generate a storm time series across 100 years. After the storm
time series is generated, a plot will be drawn with storm intensity on the y-axis
and the storm duration along the x - axis.
Keep in mind - because these are drawn from a statistical distribution in
a stochastic manner, each run of this driver will differ from the last run.
Written by Jordan Marie Adams, 2013.
"""
from __future__ import print_function
from landlab.components.uniform_precip import PrecipitationDistribution
from matplotlib import pyplot as plt
def create_precip_plot(storm_arr):
# Plotting precipitation distribution time series
# Creating a new figure instance
plt.figure(1)
# Labeling the x and y axes
plt.xlabel('Time (years)', fontsize=14)
plt.ylabel('Rainfall Intensity (mm/day)', fontsize=14)
# Setting the plot title
plt.title('Randomly Generated Rainfall Time Series', fontsize=16)
# Now to plot the axes the way we'd like them...
ax = plt.gca()
# Manually set the ten times (in hours) to plot across 100 years
tick_locations = ([0, 3652.42, 7304.84, 10957.26, 14609.68, 18262.1,
21914.52, 25566.96, 29219.36, 32871.78, 36524.2])
# This next list will actually replace the hours with time in years.
tick_labels=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
# Swapping out the two tick labels to plot intensity against years.
plt.xticks(tick_locations, tick_labels)
# Setting tick label size.
ax.tick_params(labelsize=14)
# Setting the limits for the x and y
plt.xlim(0, 36524.2)
plt.ylim(ymin=0, ymax=20)
# Looping through the storm array to plot the intensity as the height of each bar plot
# and the width will correspond to the storm duration.
for s in storm_arr:
x = storm_arr.index(s)
start = storm_arr[x][0]
end = storm_arr[x][1] - storm_arr[x][0]
plt.broken_barh([(start, end)], (0,storm_arr[x][2]), label='Rain',
color = 'blue')
plt.show()
def main():
# First we will create an instance of PrecipitationDistribution
PD = PrecipitationDistribution(mean_storm_duration = 2.0,
mean_interstorm_duration = 50.0,
mean_storm_depth = 0.05, total_t = 37000.)
# Because the values for storm duration, interstorm duration, storm
# depth and intensity are set stochastically in the initialization
# phase, we should see that they seem reasonable.
print("Mean storm duration is: ", PD.mean_storm_duration, " hours, while",
"the value from the Poisson distribution is: ", PD.storm_duration)
print("Mean interstorm Duration is: ", PD.mean_interstorm_duration,
'hours, while the value from the Poisson distribution is: ',
PD.interstorm_duration)
print("Mean storm depth is: ", PD.mean_storm_depth, "mm, while the value",
"from the Poisson distribution is: ", PD.storm_depth)
print("Mean intensity is: ", PD.mean_intensity, "mm/hr, while the value",
"from the Poisson distribution is: ", PD.intensity)
print('\n')
# If we generate a time series we can plot a precipitation distribution
PD.get_storm_time_series()
# And get the storm array from the component..
storm_arr = PD.storm_time_series
# And now to call the plotting method.
create_precip_plot(storm_arr)
if __name__ == '__main__':
main()
| mit |
mcflugen/topoflow | topoflow/components/smooth_DEM.py | 2 | 75891 |
#-------------------------------------------------------------------
# Note: We can now compute a new D8 flow grid and area grid for
# the new DEM and repeat this process until the flow grid
# no longer changes. Need to use d8_global.py (which
# computes flow and area grids; used by Erode) instead of
# tf_d8_base.py as done here. The result will be a DEM that
# satisfies Flint's law exactly. For steady-state rainfall,
# it will also satisfy a slope-discharge power law exactly.
# We can then use this tool to explore how the network
# topology and geometry change as we vary c and p.
#
# Note that if S = c * A^p, Q = R * A, w = w1 * Q^b, and
# q = q1 * S^gamma, then we have:
# p = (1 - b)/gamma
# c = [R^(1-b) / (w1 * q1)]^(1/gamma)
# If b = 1/2 and gamma = -1, then p = -1/2.
#
#-------------------------------------------------------------------
from numpy import *
import numpy
import os
import scipy.optimize
import cfg_files
import CSDMS_base
import d8_global # (from Erode project)
import file_utils # (for count_lines())
import model_output
import rtg_files
import rti_files
# import matplotlib.pyplot
#-------------------------------------------------------------------------
# smooth_DEM.py
#
# Copyright (c) 2005-2012, Scott D. Peckham
# Created: May 2004
# Modified: Jul-Aug 2005
# Converted from IDL to Python: July 2010
# Worked on: read_profile_data() and find_best_fit_c_and_p(). (1/18/12)
#
#-------------------------------------------------------------------------
#
# unit_test()
# curve_fit_test()
#
# class DEM_smoother
#
# get_attribute()
# set_constants()
# initialize()
# update()
# finalize()
# ----------------------
# get_gui_info()
# get_cfg_extension()
# build_filenames() ##### OSBOLETE SOON
# initialize_d8_vars()
# ----------------------
# update_d8_vars()
# update_slopes() (Step 3)
# update_DEM() (Step 4)
# ------------------------
# read_profile_data() (Step 1)
# find_best_fit_c_and_p() (Step 2)
# ------------------------
# open_input_files()
# read_input_files()
# close_input_files()
# ------------------------
# update_outfile_names()
# open_output_files()
# write_output_files
# close_output_files()
# save_grids()
# save_pixel_values()
#
#-------------------------------------------------------------------------
def unit_test():
c = DEM_smoother()
c.CCA = False
c.DEBUG = True
## # cfg_directory = '/data/sims/erode/small_ky/'
## cfg_directory = 'Applications/Erode/Data/Small_KY/'
## cfg_prefix = 'Case1'
## c.site_prefix = 'Small'
cfg_directory = '/home/csdms/models/erode/0.5/share/data/KY_Sub/'
# cfg_directory = 'Applications/Erode/Data/KY_Sub/'
cfg_prefix = 'Smooth1' ### 'Case1'
c.site_prefix = 'KY_Sub'
#--------------------------------------------
# Note: n_steps must be read from CFG file;
# setting it here gets over-ridden.
#--------------------------------------------
c.run_model(cfg_directory=cfg_directory,
cfg_prefix=cfg_prefix)
## c.initialize()
## c.update()
# unit_test()
#-------------------------------------------------------------------------
def curve_fit_test():
#------------------------------------------------------------
# Notes: This test function shows that the function:
# find_best_fit_c_and_p() works, but typically
# does not give the p-value to high accuracy.
#------------------------------------------------------------
#------------------------
# Starting on a divide
# and moving downstream
#------------------------
#** x0 = 0.001 # (IDL: doesn't converge)
#** x0 = 0.01 # (IDL: doesn't converge)
#** x0 = 0.1 # (IDL: converges; large stderr)
x0 = float64(1)
x = arange(100, dtype='Float64') + x0 # (distance [km]; NB! x[0]=x0)
xmin = x.min()
xmax = x.max()
Amax = float64(625) # [km^2]
ca = Amax / xmax ** float64(2) # [unitless]
A = ca * x ** 2 # (area [km^2])
#--------------------------------
# If eps is small, then expect:
# p = (b - 1)/2 or b = (1 + 2p)
#--------------------------------
#b = -1.0d ;(p = -1.00)
#b = -0.9d ;(p = -0.95)
#b = -0.7d ;(p = -0.85)
#b = -0.5d ;(p = -0.75)
b = -float64(0.3) #(p = -0.65) ;(closest to actual for KY_Sub?)
#b = -0.1d ;(p = -0.55)
#------------------------------------------
# Make sure that z[x0] = z0. Note that
# if x0=0, then alog(0) will occur in fit
# and fitting procedure will fail.
#------------------------------------------
z0 = numpy.float64(600)
z = z0 * (x - x0 + float64(1)) ** b # (elevation [meters])
#** eps = 1e-6
#** z = z0 * (x + eps)^b ;(elevation [meters])
#** z = z / (1d + eps)^b ;(so that z[0] = z0)
#** z = -1d * 0.01d * alog(x + 1d) ;(elevation [meters])
#---------------------------------
# Doesn't perform well for these
#---------------------------------
#** z = 600d - (5.9d * x^2d)
#** z = 600d - (5.9d * x^0.5)
#** z = 600d - (5.9d * x)
#------------------------------------
# Reverse the vectors so that we
# start at outlet and move upstream
#-----------------------------------------------------------
# Must use FLIPUD(x) vs. ROT90(x,-2) to reverse 1D arrays.
#-----------------------------------------------------------
x2 = numpy.flipud( x )
A2 = numpy.flipud( A )
z2 = numpy.flipud( z )
#--------------------------
# Find the best-fit curve
#--------------------------
c, p = best_slope_area_curve_fit( A2, z2 )
print 'best-fit c =', c
print 'best-fit p =', p
#-----------------------------------
zfit = slope_area_function( A, c, p ) # (z0 and ds via "data")
print 'zfit[0] = ', zfit[0]
#----------------------------------
# Print expected curve-fit values
#----------------------------------
pe = (b - float64(1)) / float64(2)
ce = absolute((z0 * b) / (ca ** pe)) #(abs since S>0 by convention)
print 'ce =', ce
print 'pe =', pe
print ' '
#---------------------------
# Create a plot to compare
# fitted curve to original
#---------------------------
## matplotlib.pyplot.figure(1, figsize=(800/80.0, 600/80.0), dpi=80)
## matplotlib.pyplot.show()
## loadct(39, silent=True) #####
## black = int16(0)
## white = int16(255)
## red = int16(220)
## matplotlib.pyplot.plot(x2, z2, color='k')
## matplotlib.pyplot.axes(axisbg='w')
## matplotlib.pyplot.show()
## oplot(x2, yfit, psym=-1, color=red) ####
# curve_fit_test()
#-------------------------------------------------------------------------
class DEM_smoother( BMI_base.BMI_component ):
#-----------------------------------------------------------------
# Note: Do not define an __init__() method here. It will
# override things needed from CSDMS_base.__init__()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
map = {'comp_name': 'DEMSmoother',
'version': '0.5',
'model_name': 'DEM_Smoother',
'model_family': 'Erode',
'cfg_template_file': 'DEM_Smoother.cfg.in',
'cfg_extension': '_dem_smoother.cfg',
'cmt_var_prefix': '/DEMSmoother/Input/Var/',
'gui_xml_file': '/home/csdms/cca/erode/0.5/src/share/cmt/gui/DEM_Smoother.xml',
'dialog_title': 'DEM Profile Smoother Parameters',
'time_step_type': 'fixed',
'time_units': 'years',
'mesh_type': 'uniform',
'author_name': 'Scott Peckham'}
try:
return map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def set_constants(self):
self.nodata = numpy.float32(-9999)
#----------------------------------------------
# Maybe set constants "c" and "p" this way ??
# Or maybe read them as input parameters ??
#----------------------------------------------
## self.read_profile_data()
## self.find_best_fit_c_and_p()
# set_constants()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
self.comp_name = 'DEM Smoother component'
if not(SILENT):
print ' '
print self.comp_name + ': Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
# self.build_filenames() ##########
self.read_grid_info()
self.initialize_basin_vars()
#-----------------------------------------
# This must come before "Disabled" test.
#-----------------------------------------
self.initialize_time_vars()
#------------------------------------------------
# Append in_directory to input files. (1/17/12)
#------------------------------------------------
self.DEM_file = self.in_directory + self.DEM_file
self.profile_file = self.in_directory + self.profile_file
#----------------------------------------------
# Maybe set constants "c" and "p" this way ??
# Or maybe read them as input parameters ??
#----------------------------------------------
if (self.FIT_C_AND_P):
print 'Finding best-fit c and p from:'
print ' ' + self.profile_file
self.read_profile_data()
self.find_best_fit_c_and_p()
#----------------------------------
# Has component been turned off ?
#----------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print self.comp_name + ': Disabled.'
self.DONE = True
self.status = 'initialized' # (OpenMI 2.0 convention)
return
else:
self.DONE = False
#---------------------------------------------
# Open input files needed to initialize vars
#---------------------------------------------
# Can't move read_input_files() to start of
# update(), since initial values needed here.
#---------------------------------------------
self.open_input_files()
self.read_input_files()
#-----------------------
# Initialize variables
#-----------------------
self.initialize_d8_vars() # (depend on D8 flow grid)
# self.initialize_computed_vars()
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
def update(self, time_seconds=None):
#---------------------------------------------
# Note that u and d from previous time step
# must be used on RHS of the equations here.
#---------------------------------------------
self.status = 'updating' # (OpenMI 2.0 convention)
## if (self.mode == 'driver'):
## self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
# self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
#-------------------------
# Update computed values
#-------------------------
self.update_d8_vars() # (compute new D8 flow and area grids)
self.update_slopes() # (compute new slopes from D8 areas)
self.update_DEM()
#-------------------------------------------
# Read from files as needed to update vars
#--------------------------------------------------------
# NB! This is currently not needed because values don't
# change over time and read_input_files() is called by
# initialize().
#--------------------------------------------------------
# if (self.time_index > 0):
# self.read_input_files()
#------------------------------------------------------
# Update internal clock *before* write_output_files()
# because we're going to save original DEM, too, with
# a time of zero.
#------------------------------------------------------
self.update_time()
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
self.write_output_files( time_seconds )
### self.update_time() # (after write_output_files()
OK = True ##### (can be used for some test)
if (OK):
self.status = 'updated' # (OpenMI 2.0 convention)
else:
self.status = 'failed'
self.DONE = True
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
self.close_input_files() ## TopoFlow input "data streams"
self.close_output_files()
self.status = 'finalized' # (OpenMI)
print '(c, p) = ' + str(self.c) + ', ' + str(self.p)
print ' '
self.print_final_report(comp_name=self.comp_name)
#---------------------------
# Release all of the ports
#----------------------------------------
# Make this call in "finalize()" method
# of the component's CCA Imple file
#----------------------------------------
# self.release_cca_ports( d_services )
# finalize()
#-------------------------------------------------------------------
## def build_filenames(self):
##
## #--------------------------------------------------------
## # Note: These could all be read from an input file, or
## # we could just prompt for prefix and new_prefix.
## #--------------------------------------------------------
## if (hasattr(self, 'site_prefix')):
## prefix = self.site_prefix
## self.DEM_file = prefix + '_DEM.rtg'
## else:
## prefix, extension = file_utils.get_prefix_and_extension( self.DEM_file )
##
## #--------------------------------------------
## # Build input filenames from site_prefix ??
## #--------------------------------------------
#### if (self.DEM_file is None):
#### self.DEM_file = prefix + '_DEM.rtg'
##
## ####################################
## self.profile_file = None
## self.area_file = None
## self.flow_file = None
## self.new_DEM_file = None
## self.new_RTI_file = None
## self.new_slope_file = None
## self.new_rawDEM_file = None
## self.new_flow_file = None
## ####################################
##
## if (self.profile_file is None):
## self.profile_file = prefix + '_prof1.txt'
##
## if (self.area_file is None):
## self.area_file = prefix + '_area.rtg'
## #----------------------------------------------------
## # D-infinity areas may not be monotonic increasing,
## # and then the slopes won't decrease downstream.
## #----------------------------------------------------
## ### area_file = prefix + '_dinf-area.rtg'
##
## if (self.flow_file is None):
## self.flow_file = prefix + '_flow.rtg'
##
## #----------------------------------------------
## # Build output filenames from site_prefix ??
## #----------------------------------------------
## new_prefix = (prefix + '2') #####
## if (self.new_DEM_file is None):
## self.new_DEM_file = new_prefix + '_DEM.rtg'
##
## if (self.new_RTI_file is None):
## self.new_RTI_file = new_prefix + '.rti'
##
## if (self.new_slope_file is None):
## self.new_slope_file = new_prefix + '_slope.rtg'
##
## if (self.new_rawDEM_file is None):
## self.new_rawDEM_file = new_prefix + '_rawDEM.rtg'
##
## if (self.new_flow_file is None):
## self.new_flow_file = new_prefix + '_flow.rtg'
##
## # build_filenames()
#-------------------------------------------------------------------
def initialize_d8_vars(self):
#---------------------------------------------
# Compute and store a variety of (static) D8
# flow grid variables. Embed structure into
# the current component.
#---------------------------------------------
self.d8 = d8_global.d8_component()
self.d8.DEBUG = False # (make sure self tests are OFF)
################################################
# (5/13/10) Do next lines here for now, until
# the d8 cfg_file includes site prefix.
# Same is done in GW_base.py.
################################################
# (1/17/12) Note that d8_base.py now has a new
# method called: set_default_config_vars()
# that is used to intialize vars in cases
# where there is no "*_d8_global.cfg" file.
# It is called in d8.initialize().
################################################
self.d8.DEM_file = self.DEM_file # (1/17/12) in_directory already prepended?
self.d8.FILL_PITS_IN_Z0 = 0 # (1/17/12)
self.d8.A_units = 'km^2' # (1/17/12) May be needed.
#--------------------------------------------------
# D8 component builds its cfg filename from these
#--------------------------------------------------
self.d8.site_prefix = self.site_prefix
self.d8.in_directory = self.in_directory
self.d8.initialize( cfg_file=None,
SILENT=self.SILENT,
REPORT=self.REPORT )
#---------------------------------------------------------
# Need "A_units" to be km^2 to compare to RT area grid
# so override setting in the CFG file, needed for Erode.
#---------------------------------------------------------
## if (self.DEBUG):
## self.d8.A_units = 'km^2' #####
# initialize_d8_vars()
#-------------------------------------------------------------------
def update_d8_vars(self, SILENT=True, REPORT=False,
SAVE_RTG=False):
#---------------------------------------------
# Update the D8 flow grid and all vars that
# depend on it, including D8 area grid.
#---------------------------------------------
# Area grid units are either 'm^2' or 'km^2'
# based on a setting in "*_d8.cfg" file.
# All length units are given in meters.
#---------------------------------------------
# d8.update() needs a depression-filled DEM
# and can later get it from a CCA port.
#---------------------------------------------
self.d8.update( self.time, DEM=self.DEM,
SILENT=SILENT, REPORT=REPORT )
#-----------------------------
# Save grid as an RTG file ?
#-----------------------------
if (SAVE_RTG):
d8_file = (self.case_prefix + '_flow.rtg')
rtg_files.write_grid( self.d8.d8_grid, d8_file, self.rti,
RTG_type='BYTE')
area_file = (self.case_prefix + '_area.rtg')
rtg_files.write_grid( self.d8.A, area_file, self.rti)
# update_d8_vars()
#-------------------------------------------------------------------------
def update_slopes(self):
Amin = numpy.nanmin( self.d8.A )
Amax = numpy.nanmax( self.d8.A )
print 'Min(A), Max(A) = ', Amin, ', ', Amax
#-------------------------------------------
# Compute new slope grid from D8 area grid
# using best-fit Flint's law parameters.
#-------------------------------------------
# S[0]=0 and S[Inf]=0
# Smax = (1-exp(-1)) * Ah^p * c
#-----------------------------------
#----------------------------------------------------
# Idea to produce convex hilltops at Ah = hillslope
# scale. Otherwise get singularity at A=0.
#----------------------------------------------------
# self.S = self.c * (A**self.p) * (1.0 - exp(-A/Ah))
#-------------------------------------------
# Raising zero to a negative power produces
# a "divide by zero" error message.
# Also can't use "float64" for S.
#-------------------------------------------
## self.S = self.c * (self.d8.A ** self.p)
self.S = numpy.zeros( self.d8.A.shape, dtype='float32')
wpos = where( self.d8.A > 0 )
if (wpos[0].size > 0):
self.S[wpos] = self.c * (self.d8.A[wpos] ** self.p)
wneg = where (self.d8.A <= 0 )
if (wneg[0].size > 0):
self.S[wneg] = 0
#----------------------------------------------------------
# Note: These should match the slopes computed from the
# D8 area grid using Flint's law, but remember that
# we're using the original D8 flow directions which
# may not be strictly valid for the new DEM.
#----------------------------------------------------------
## dz = (self.new_DEM - self.new_DEM.flat[ self.d8.parent_ID_grid ])
## self.S = (dz / self.d8.ds)
# update_slopes()
#-------------------------------------------------------------------------
def update_DEM(self):
#------------------------------------------------------------------
# NOTES: This routine uses a slope-area relationship, an area
# grid and a D8 flow grid to create a new, smoother DEM
# from an old one. The reason for wanting to do some-
# thing like this is that slopes in channels are very
# poorly resolved by local methods, even though slopes
# on hillslopes can be computed reasonably well.
# It operates on a principle of "raster recursion" and
# should be adaptable to the computation of other
# recursively-defined quantities. That is, it:
# (1) initializes the raster file, and
# (2) makes several passes through the file (line by
# line), looking for pixels whose _parent_ has a
# known value, and
# (3) assigns these pixels a value which is determined
# from the value of the parent.
#
# Note that self.d8.ds has units of meters.
#------------------------------------------------------------------
#
# This routine is based on the one used to compute flow
# distances to a set of masked pixels (e.g. pixels with
# undefined flow codes.) They use a type of raster recursion,
# but unlike most others, they work upstream, from parents to
# their kids, instead of downstream from kids to parent pixels.
#
#------------------------------------------------------------------
info = self.rti
## info = rti_files.read_info( self.DEM_file )
nx = info.ncols
ny = info.nrows
## byte_order = info.byte_order
#--------------------------------------------
# Read the DEM, area and D8 flow code grids
#--------------------------------------------
# self.DEM = rtg_files.read_grid( DEM_file, info, RTG_type=info.data_type )
# self.areas = rtg_files.read_grid( area_file, info, RTG_type='FLOAT' )
# self.codes = rtg_files.read_grid( flow_file, info, RTG_type='BYTE' )
#----------------------------------------------
# Get a where-style tuple of parent pixel IDs
#-------------------------------------------------------
# Find the pixels that flow to a nodata or edge pixel;
# the parents of these pixels have a flow code of 0.
#-------------------------------------------------------
pIDs = self.d8.parent_IDs # (where-style tuple)
parent_codes = self.d8.d8_grid[ pIDs ]
w = numpy.where(parent_codes == 0)
nw = w[0].size # (much faster)
## nw = numpy.size( w[0] )
#---------------------------------------------
# OLD METHOD that can't handle nodata pixels
#---------------------------------------------
## w = where(codes == 0)
## w = where(logical_and(codes == 0, DEM > self.nodata) )
## nw = w[0].size
#--------------------------------------
# Are there any pixels to work with ?
#--------------------------------------
if (nw == 0):
print 'ERROR: '
print 'No pixels to initialize iteration.'
print ' '
return
#-------------------------------------------------
# Initialize values in new DEM to be same as in
# old DEM for pixels whose parent flow code = 0
# and nodata value otherwise
#-------------------------------------------------
self.DEM = zeros([ny, nx], dtype='Float32') + self.nodata
self.DEM[ w ] = self.z0[ w ]
#----------------------------------------------------------------
self.flow_dist = zeros([ny, nx], dtype='Float32') + self.nodata
self.flow_dist[ w ] = 0
#----------------------------------------------------------------
n_reps = numpy.int32(0)
DONE = False
#------------------------------------------
# Iteratively assign new elevation values
#------------------------------------------
while True:
STILL_ACTIVE = False
IDs = where( self.DEM == self.nodata ) # (tuple)
n_IDs = IDs[0].size
## n_IDs = numpy.size(IDs[0]) # (much slower)
n_reps += 1
if (n_IDs != 0):
#-------------------------------------
# Get elevations of D8 parent pixels
#-------------------------------------
## dvals = self.d8.d8_grid[ IDs ] # (not needed)
pIDs = self.d8.parent_ID_grid[ IDs ]
p_elev = self.DEM.flat[ pIDs ]
p_dist = self.flow_dist.flat[ pIDs ] ####
#-------------------------------------
# If D8 parent elevation is known,
# then assign elevations to D8 kids.
#-------------------------------------
wp = where( p_elev != self.nodata )
n_assigned = wp[0].size # (much faster)
## n_assigned = size(wp[0]) # (much slower)
if (n_assigned != 0):
#----------------------------------------------
# Get "calendar-style" indices of "ready IDs"
#----------------------------------------------
ID_rows = IDs[0]
ID_cols = IDs[1]
ID_vals = (ID_rows * nx) + ID_cols
ready_IDs = ID_vals[ wp ]
#--------------------------------
# Compute new slopes from areas
#--------------------------------
S_vals = self.S.flat[ ready_IDs ]
#--------------------------------------
# Get upstream areas of parent's kids
# and compute new slopes from areas
#--------------------------------------
#### Avals = self.d8.A.flat[ ready_IDs ] # (later on)
## A_vals = self.areas.flat[ ready_IDs ]
## S_vals = self.c * (A_vals ** self.p)
#-----------------------------------
# S(0)=0 and S(Inf)=0
# Smax = (1-exp(-1)) * Ah^p * c
#-----------------------------------
#** S_vals = c * (A_vals^p) * (1.0 - exp(-A_vals/Ah))
#-----------------------------------
# Try to capture convex hillslopes
# with a second power-law curve.
#-------------------------------------------------------------
# Can force continuity, but can't match derivatives or
# get p2=p1. This can be seen with a figure. We expect
# 0 < p2 < 1, so we'll just fix p2 and compute c2 from cont.
#-------------------------------------------------------------
# ww = where(A_vals < Ah)
# nww = ww[0].size
# if (nww != 0):
# Smax = c * (Ah**p)
#** p2 = 0.1
#** p2 = 0.5
# p2 = 0.8
#** p2 = 1
#** p2 = 2
#** p2 = 4
# c2 = Smax / Ah**p2
# S_vals[ww] = c2 * (A_vals[ww]**p2)
#------------------------------------------
# Update the new, smooth elevation values
#---------------------------------------------------------
# Note: Since D8 areas always increase downstream, the
# slopes computed from Flint's law must always decrease.
#---------------------------------------------------------
ds_vals = self.d8.ds.flat[ ready_IDs ] # [meters]
dz_vals = S_vals * ds_vals # [meters]
self.DEM.flat[ ready_IDs ] = (p_elev[wp] + dz_vals)
STILL_ACTIVE = True
#-------------------------------------
# Compute the flow distances to edge
#-------------------------------------
self.flow_dist.flat[ ready_IDs ] = (p_dist[wp] + ds_vals)
#------------------------
# Are we finished yet ?
#------------------------
DONE = (n_assigned == n_IDs)
if (DONE or not(STILL_ACTIVE)): break
#--------------------------
# Compute DEM min and max
#--------------------------
self.zmin = numpy.nanmin(self.DEM)
self.zmax = numpy.nanmax(self.DEM)
#--------------------------------------------------
# Adjust the values by a distance-weighted amount
# so that max of new DEM will be same as old
#-------------------------------------------------
# wmax = where( self.DEM == self.zmax )
# dmax = self.flow_dist[ (wmax[0][0], wmax[1][0]) ]
# del_z = (self.flow_dist / dmax)*(self.zmax - self.z0max)
# self.DEM = self.DEM - del_z
#-------------------------------------------------
# Scale the values by a distance-weighted factor
# so that max of new DEM will be same as old
#-------------------------------------------------
# factor = (1 - (self.flow_dist / dmax)) *
# self.DEM = self.DEM * factor
#----------------------
# Print final message
#----------------------
## if (self.REPORT):
## print 'Finished with new DEM. '
## print ' '
print 'Number of iterations = ' + str(n_reps)
print 'Min/Max of orig. DEM = ' + \
str(self.z0min) + ', ' + str(self.z0max)
print 'Min/Max of new DEM = ' + \
str(self.zmin) + ', ' + str(self.zmax)
print ' '
# update_DEM()
#-------------------------------------------------------------------------
def read_profile_data(self, n_header=None):
#--------------------------------------------------------
# Notes: This routine gets pixel IDs for a main channel
# streamline from profile_file and uses them to
# get elevations, areas and pixel-to-pixel flow
# lengths along the main channel for use by the
# best_slope_area_curve_fit routine.
#--------------------------------------------------------
if (n_header is None):
n_header = numpy.int16(6)
#------------------------------
# Count lines in profile file
#------------------------------
n_lines = file_utils.count_lines( self.profile_file, SILENT=True )
n_lines = (n_lines - n_header)
#-------------------------------
dist = numpy.zeros([n_lines], dtype='Float64') ## 1/16/12
elev = numpy.zeros([n_lines], dtype='Float64') ## 1/16/12
cols = numpy.zeros([n_lines], dtype='Int32')
rows = numpy.zeros([n_lines], dtype='Int32')
#-----------------------------
# Open file to read IDs and
# skip over the header lines
#-----------------------------
file_unit = open(self.profile_file, 'r')
cfg_files.skip_header( file_unit, n_lines=n_header )
#----------------------------------
# Read the column and row vectors
#-----------------------------------------------------
# Profile file has: distance, elevation, column, row
#-----------------------------------------------------
dtype_list = ['float64','float64','int32', 'int32']
for k in xrange(n_lines):
var_list = cfg_files.read_list( file_unit, dtype_list=dtype_list )
dist[k] = var_list[0] ## 1/16/12
elev[k] = var_list[1] ## 1/16/12
cols[k] = var_list[2]
rows[k] = var_list[3]
#---------------------
# Close profile_file
#---------------------
file_unit.close()
#--------------------------------------------
# Read the DEM, area and D8 flow code grids
#-------------------------------------------------
# 1/16/12. Should we add area_file and flow_file
# to the CFG file? It already has DEM_file.
#-------------------------------------------------
dp = (self.in_directory + self.site_prefix)
DEM_file = dp + '_DEM.rtg' ## 1/16/12
area_file = dp + '_area.rtg' ## 1/16/12
#--------------------------------------------
info = self.rti
DEM = rtg_files.read_grid( self.DEM_file, info, RTG_type=info.data_type )
areas = rtg_files.read_grid( area_file, info, RTG_type='FLOAT' )
## ds = rtg_files.read_grid( ds_file, info, RTG_type='FLOAT' )
######### Done by read_input_files() ??
#---------------------------------------
# Only used for Flow_Lengths function.
#---------------------------------------
# flow_file = self.site_prefix + '_flow.rtg' ## 1/16/12
# codes = rtg_files.read_grid( flow_file, info, RTG_type='BYTE' )
#-----------------------------------------------------
# Compute the along-channel flow lengths (ds vector)
#-----------------------------------------------------
# ds = Flow_Lengths(codes, RTI_file, METERS=True, DOUBLE=True) ########
#------------------------------------------------------
# Construct ds vector from distances in profile_file.
# First distance is always zero.
# Also need to convert from km to meters.
# Size of "diffs" is one less than size of "dist".
#------------------------------------------------------
diffs = numpy.diff( dist )
# print 'size(dist) =', dist.size
# print 'size(diffs) =', diffs.size
ds_profile = numpy.zeros( dist.size, dtype='Float64' )
ds_profile[:-1] = diffs
ds_profile[-1] = diffs[-2] ###################### NOT STRICTLY CORRECT
ds_profile = ds_profile * 1000.0 # [meters]
#------------------------------------------
# Construct calendar-style streamline IDs
#------------------------------------------
ncols = numpy.size(DEM, 1)
IDs = (ncols * rows) + cols
#-------------------------------------
# Get the profile elevations & areas
#-------------------------------------
### z_profile = elev # (Use this instead ?? 1/16/12)
z_profile = DEM.flat[ IDs ] # [meters]
A_profile = areas.flat[ IDs ] # [km^2]
# ds_profile = ds.flat[ IDs ] # [meters]
#-------------------------------------
# Reverse the vectors so that values
# start at outlet and work upstream
#-----------------------------------------------------------
# Must use FLIPUD(x) vs. ROT90(x,-2) to reverse 1D arrays.
#-----------------------------------------------------------
self.A_profile = numpy.flipud( A_profile )
self.z_profile = numpy.flipud( z_profile )
self.ds_profile = numpy.flipud( ds_profile )
# read_profile_data()
#-------------------------------------------------------------------------
def find_best_fit_c_and_p(self, weights=None, REPORT=True):
## itmax=None, tol=None ):
#------------------------------------------------------------
# Notes: These notes are for the original IDL version.
#
# This function uses IDL's CURVEFIT function and the
# procedure slope_area_curve (above) to find the
# best-fit parameters for fitting the data vectors
# A and z.
# x and y can have as few as 3 unique points, but
# must contain 4 elements each to avoid an error
# from IDL. The 3rd value can simply be repeated.
# Initial guesses are required for all of the power
# curve parameters (a,c,p) and the choice of these
# has a big impact on whether CURVEFIT converges to
# a solution. Some experimenting is necessary but
# the initial guess for p must be a large negative
# number like -10 if p is expected to be negative
# and a small positive number like 0.001 if p is
# expected to be positive ??
# The array of flags, fitvars, determines which
# parameters are fixed and which ones to find, we
# don't need to find z0, but we need to pass it.
#------------------------------------------------------------
A = self.A_profile
z = self.z_profile
ds = self.ds_profile
#---------------------------------------------
# Set weights for the curve fitting function
#---------------------------------------------
if (weights is None):
#-----------------------------------------------
# Use equal weights; gives smaller stderr
# but further from computed p value. A
# leading constant didn't matter for CURVEFIT.
#-----------------------------------------------
# weights = numpy.ones( A.size )
#----------------------------------------------
# Poisson statistical weighting, gives a
# smaller stderr, but further from computed p
#----------------------------------------------
# weights = 1 / z
#------------------------------------------------
# Weight by contributing areas: improved fit.
# Since c and p values are used for entire DEM,
# and since the number of streamlines that pass
# through a given pixel is proportional to the
# contributing area, A, this makes some sense.
#------------------------------------------------
weights = A
# weights = (A ** 1.1)
# weights = numpy.sqrt(A) ;(good compromise ?)
# weights = (A ** 0.75)
#---------------------------------------------
# Combination of previous two methods, gives
# worst stderr but closest to computed p.
# Note that xdata=A, ydata=z in curve fit.
#---------------------------------------------
# weights = (A / z)
w0 = where(weights == 0)
nw0 = w0[0].size
if (nw0 != 0):
weights[w0] = numpy.float64(1)
#------------------------------------------
# Points used to generate initial guesses
#------------------------------------------
z0 = z[0]
# z1 = z[1]
z2 = z[-1]
#---------------------------------------------
# Need initial guesses with good convergence
# properties; not necessarily close to value
# (These worked well for IDL's CURVEFIT.)
# Can't use p0 since keyword to curve_fit().
#---------------------------------------------
pg = numpy.float64( -0.5 )
cg = (z2 - z0) / numpy.sum(numpy.float64(ds * (A ** pg)))
#-------------------------------------------------------------
# Define fitting function needed by scipy.optimize.curve_fit.
# First argument is only allowed independent variable, while
# remaining arguments are the fitting parameters. Note that
# ds (a vector) and z0 are treated as known values; the
# curve_fit() function does not allow them as arguments.
# Recall that S = c * A^p, and z = z0 + cumsum(ds * S).
# We also want the first element of the estimate to be z0,
# so we prepend 0 to dz.
#
# It looks like the first argument needs to be a scalar
# in order for this to find best fit for both c and p.
#-------------------------------------------------------------
def fit_function(AA, cc, pp):
dz = cc * numpy.float64( ds * ( AA ** pp ) )
dz = numpy.concatenate(( [0], dz[:-1] ))
return z0 + numpy.cumsum( dz )
#--------------------------------------------------
# Define "L2_error" function, also called E(c,p).
#--------------------------------------------------
def L2_error( params ): ###, *weights ):
cc = params[0]
pp = params[1]
nz = z.size
dz = cc * numpy.float64( ds * ( A ** pp ) )
dz = numpy.concatenate(( [0], dz[:-1] ))
zf = z0 + numpy.cumsum(dz)
#------------------------------------------------
# Experiment: Weighting by contributing area.
# This gives a lower p-value, but seems to give
# better results when applied to entire DEM.
#------------------------------------------------
weights = A
return numpy.sqrt( numpy.sum( weights*(z - zf)**2 ) / nz)
# if (weights is None):
# return numpy.sqrt( numpy.sum( (z - zf)**2 ) / nz)
# else:
# return numpy.sqrt( numpy.sum( weights*(z - zf)**2 ) / nz)
#----------------------------------------------------
# Define "Fk(p)" function used by c1(p) and c2(p).
#----------------------------------------------------
def Fk_function( k, p ):
if (k == 0): return 0.0
A_vals = A[1: k+1]
ds_vals = ds[1: k+1]
return numpy.sum( (A_vals**p) * ds_vals )
#----------------------------------------------------
# Define "Fk(p)" function used by c1(p) and c2(p).
#----------------------------------------------------
def Fkd_function( k, p ):
if (k == 0): return 0.0
A_vals = A[1: k+1]
ds_vals = ds[1: k+1]
return numpy.sum( (A_vals**p) * numpy.log(A_vals) * ds_vals )
#----------------------------------------------------
# Define "c1(p)" function from d/dp[ E(c,p) ] = 0.
#----------------------------------------------------
def c1_function( p ):
nz = z.size
Fk_vals = numpy.zeros( nz, dtype='float64' )
Fkd_vals = numpy.zeros( nz, dtype='float64' )
for k in xrange( nz ):
Fk_vals[ k ] = Fk_function( k, p )
Fkd_vals[ k ] = Fkd_function( k, p )
top = numpy.sum( (z - z0) * Fkd_vals )
bot = numpy.sum( Fk_vals * Fkd_vals )
return (top / bot)
#----------------------------------------------------
# Define "c2(p)" function from d/dc[ E(c,p) ] = 0.
#----------------------------------------------------
def c2_function( p ):
nz = z.size
Fk_vals = numpy.zeros( nz, dtype='float64' )
Fkd_vals = numpy.zeros( nz, dtype='float64' )
for k in xrange( nz ):
Fk_vals[ k ] = Fk_function( k, p )
Fkd_vals[ k ] = Fkd_function( k, p )
top = numpy.sum( (z - z0) * Fk_vals )
bot = numpy.sum( Fk_vals ** 2)
return (top / bot)
#-------------------------------------------------
# Define "c_diff(p)" function (for root finder)
# Best c and p should be where c1(p) = c2(p).
#-------------------------------------------------
def c_diff( p ):
return ( c1_function(p) - c2_function(p) )
#-------------------------------
# Define "c_diff2(p)" function
#-------------------------------
def c_diff2( p ):
return ( c1_function(p) - c2_function(p) )**2
#---------------------------------------------------------------
# Use scipy.optimize.fmin() to find best-fit parameters
# by finding parameters that minimize the L2 error.
# This uses the Nelder-Mead downhill simplex algorithm.
#---------------------------------------------------------------
# See: http://docs.scipy.org/doc/scipy/reference/optimize.html
#---------------------------------------------------------------
# If (disp=True), convergence messages are printed.
# If (retall=True), best_params contains a list of solutions.
#-------------------------------------------------------------
xtol = 1e-12 # (tolerance in best parameters)
maxiter = 300 # (max number of iterations)
best_guesses = numpy.array((cg, pg)) # (an nd_array)
#-----------------------------------------------------------
# Each of these methods works, with very similar results,
# including c, p, maxerr, E(c,p), c_1(p) and c_2(p).
#-----------------------------------------------------------
# Note that L2_error() now uses "weights". It was
# found previously with IDL's CURVEFIT that best results
# were obtained with (weights = A), which causes downstream
# points/pixels to have greater influence. This makes some
# sense since the number of distinct streamlines/profiles
# that pass through a given pixel is proportional to its
# contributing area. It also causes the max in the new
# DEMs to have a much more reasonable value, even though
# the fit to the main channel profile used to find c and
# p has a greater error.
#-----------------------------------------------------------
results = scipy.optimize.fmin( L2_error, best_guesses,
xtol=xtol, maxiter=maxiter,
disp=True, retall=True )
#------------------------------------------------------------------
# results = scipy.optimize.fmin_powell( L2_error, best_guesses,
# xtol=xtol, maxiter=maxiter,
# disp=True, retall=True )
#------------------------------------------------------------
# This experimental method also worked, but resulted in
# larger maxerr and stderr, even though c1(p) and c2(p)
# were closer to equal. Note that func(a) and func(b) must
# have opposite signs and they did for KY_Sub when a=-1.0,
# b=1.0, as shown. Also took longer to run.
#------------------------------------------------------------
# best_p = scipy.optimize.brentq( c_diff, -1.0, 1.0,
# xtol=xtol, maxiter=maxiter, disp=True )
# best_c = c1_function( best_p )
# best_pair = numpy.array( [best_c, best_p] )
# results = ( best_pair, best_pair )
#-----------------------------------------------------------
# Experimental method. Didn't work with c_diff2 above.
#-----------------------------------------------------------
# p_guess = numpy.array( pg )
# results = scipy.optimize.fmin( c_diff2, p_guess,
# xtol=xtol, maxiter=maxiter, disp=True, retall=True )
# best_p = results[0]
# best_c = c1_function( best_p )
# best_pair = numpy.array( best_c, best_p )
# results[0] = best_pair
#-----------------------------------------------------------
# Didn't work with the default settings, as shown here.
# DISP keyword not suppported in SciPy 0.9.
#-----------------------------------------------------------
# best_params = scipy.optimize.anneal( L2_error, best_guesses,
# feps=xtol, maxiter=maxiter )
# results = [ best_params, best_params ]
#--------------------------------------------------------------------
# This method requires a function for the derivative, "fprime"
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_ncg( L2_error, best_guesses,
# fprime= ????????,
# avextol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
# These methods didn't give similar results; p did not change from
# its initial value. Also, they don't allow the XTOL keyword,
# but tried the GTOL keyword.
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_cg( L2_error, best_guesses,
# gtol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_bfgs( L2_error, best_guesses,
# gtol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
print ' ' # (after converence message)
best_params = results[0]
pair_list = results[1]
self.c = best_params[0]
self.p = best_params[1]
if (REPORT):
print 'List of (c,p) pairs:'
for pair in pair_list:
print ' (c,p) =', pair
print ' '
# Note: minimize() is not available in SciPy 0.9.
# best_params, info = scipy.optimize.minimize( L2_error, best_guesses,
# method='Nelder-Mead')
#-------------------------------------------------------------
# Use scipy.optimize.curve_fit() to find best-fit parameters.
# It uses nonlinear least squares to fit a function to data.
#-------------------------------------------------------------
# http://docs.scipy.org/doc/scipy/reference/generated/
# scipy.optimize.curve_fit.html
# Uses the Levenburg-Marquardt algorithm implemented as:
# scipy.optimize.leastsq()
# Additional keyword arguments are passed directly to that
# algorithm. See help(scipy.optimize.leastsq) for more info
# on keywords such as:
# maxfev: max number of iterations
# ftol: Relative error desired in the sum of squares.
# xtol: Relative error desired in the approximate solution.
# ier: An integer information flag. (returned)
# mesg: An error message string. (returned)
#
# popt, pcov = scipy.optimize.curve_fit(f, xdata, ydata,
# p0=None, sigma=None, **kw)
#
# Keywords not expected by curve_fit() are passed directly
# to the underlying leastsq() function.
#-------------------------------------------------------------
maxfev = 300 # (used for IDL's CURVEFIT)
xtol = numpy.float64( 1e-10 )
# xtol = numpy.float64( 1e-20 ) # (used for IDL's CURVEFIT)
# kwargs = { "maxfev":maxfev, "xtol":xtol } # (Works, but not needed.)
# I don't know how to get values returned in keywords.
# This doesn't work: kwargs = { "ier":None, "mesg":None }
# This doesn't work: kwargs = { "ier":0, "mesg":'NONE' }
# best_guesses = [cg, pg] # (a list)
# best_guesses = (cg, pg) # (a tuple)
# best_guesses = numpy.array((cg, pg)) # (an nd_array)
# xdata = A
# ydata = z
# best_params, best_cov = scipy.optimize.curve_fit( fit_function,
# xdata, ydata,
# p0=best_guesses, ## p0=None,
# ## sigma=weights, ## sigma=None,
# maxfev=maxfev,
# xtol=xtol )
# ## **kwargs )
# self.c = best_params[0]
# self.p = best_params[1]
# ier = kwargs['ier']
# mesg = kwargs['mesg']
# print 'ier =', ier
# print 'mesg =', mesg
# ier = 1
# mesg = 'NOT_SET'
#--------------------------
# Compute error estimates
#--------------------------
nz = z.size
zfit = fit_function( A, self.c, self.p )
maxerr = numpy.max( numpy.absolute( z - zfit ))
stderr = numpy.sqrt( numpy.sum( (z - zfit)**2 )/ nz )
# stderr = numpy.sqrt( numpy.sum( (z - zfit)**2 )/(nz - 1)) # (Bessel's correction?)
#--------------------------------
# Print comparison of zfit to z
#--------------------------------
if (REPORT):
for k in xrange( len(z) ):
print '(z, zfit, diff) =', z[k], ',', zfit[k], ',', (z[k]-zfit[k])
print ' '
# print 'A =', A
# print ' '
# print 'ds =', ds
# print ' '
#---------------------------
# Print an optional report
#---------------------------
if (REPORT):
print '--------------------------------------'
print ' Least squares curve fit to profile'
print ' weighted by contributing area'
print '--------------------------------------'
print 'z(A) = z0 + numpy.cumsum(dz(A))'
print 'dz(A) = [0, ds * S(A)]'
print 'S(A) = c * (A^p)'
print '--------------------------------------'
print 'z0 = ' + str(z0)
print 'zmin, zmax =', numpy.nanmin(z), ',', numpy.nanmax(z)
print 'Amin, Amax =', numpy.nanmin(A), ',', numpy.nanmax(A)
print 'dsmin, dsmax =', numpy.nanmin(ds), ',', numpy.nanmax(ds)
print '--------------------------------------'
print 'c0 = ' + str(cg)
print 'p0 = ' + str(pg)
print '--------------------------------------'
print 'c = ' + str(self.c)
print 'p = ' + str(self.p)
print 'maxerr = ' + str(maxerr)
print 'stderr = ' + str(stderr) # (same as E(c,p)
print '--------------------------------------'
print 'E(c,p) = ' + str( L2_error( best_params ) )
print 'c_1(p) = ' + str( c1_function( self.p ) )
print 'c_2(p) = ' + str( c2_function( self.p ) )
print '--------------------------------------'
print ' '
#--------------------------------
# Print status of the curve fit
#-----------------------------------------------------
# IDL's CURVEFIT provided information about whether
# the algorithm converged or not and the number of
# iterations. scipy.optimize.leastsq() provides
# similar information in "ier" and "mesg".
#-----------------------------------------------------
# good_codes = [1,2,3,4]
# if (ier not in good_codes):
# print 'Error: ' + mesg
# else:
# print 'Message: ' + mesg
# print ' '
#---------------------------------------------------
# Use IDL's CURVEFIT() to find best-fit parameters
#-------------------------------------------------------------------
# Result = CURVEFIT( X, Y, Weights, A [, Sigma] [, CHISQ=variable]
# [, /DOUBLE] [, FITA=vector]
# [, FUNCTION_NAME=string] [, /NODERIVATIVE]
# [, ITER=variable] [, ITMAX=value]
# [, STATUS={0 | 1 | 2}] [, TOL=value]
# [, YERROR=variable] )
#-------------------------------------------------------------------
# This is how CURVEFIT would be used:
#
# params = [c0, p0, z0]
# fitvars = [1, 1, 0]
# zfit = curvefit(A, z, weights, params, sigma, DOUBLE=True,
# FUNCTION_NAME='IDL_fit_function', TOL=tol,
# ITMAX=itmax, YERROR=stderr, FITA=fitvars,
# STATUS=status, ITER=iter)
# c = params[0] ; (these are passed back out)
# p = params[1]
# zfit = IDL_fit_function( A, c, p ) # (z0 and ds via "data")
# zfit = z0 + (c * cumsum(ds * A**p))
# if (status == 0):
# print 'Curve fit was successful!'
# print 'Number of iterations = ' + str(iter)
# elif (status == 1):
# print 'Curve fit failed. Chi square was '
# print 'increasing without bounds.'
# elif (status == 2):
# print 'Curve fit failed to converge after'
# print str(itmax) + ' iterations.'
# else:
# raise RuntimeError('no match found for expression')
# print ' '
#---------------------------------------------------------------------
# find_best_fit_c_and_p()
#-------------------------------------------------------------------------
## def IDL_fit_function(A, c, p):
## ### params, z, partials):
##
## #-------------------------------------------------------------
## # Notes: For use with IDL's CURVEFIT() function
## #
## # CUMULATIVE keyword to TOTAL gives partial sums and
## # returns a vector vs. a scalar.
## #-------------------------------------------------------------
## # NB! z0 is the elevation of the parent pixel of the
## # outlet pixel. It is not the elevation of the
## # outlet pixel and A is max (not zero) at the outlet
## # pixel.
## #-------------------------------------------------------------
## # NB! Procedures called by IDL's CURVEFIT must conform
## # to strict rules. This means that we have no way
## # to pass an additional vector argument like ds.
## # However, we can use a COMMON block, as done here.
## #-------------------------------------------------------------
## ds = common_block.ds_profile
## z0 = common_block.z0
##
## z = z0 + (c * numpy.cumsum( float64(ds * A ** p) ))
##
## return z
##
## #------------------------------
## # Compute partial derivatives
## #---------------------------------
## # n_params() refers to number of
## # arguments to this procedure.
## #---------------------------------
## ## if (n_params >= 4):
## ## dz_dc = numpy.cumsum(double(ds * A ** p))
## ## dz_dp = c * numpy.cumsum(double(ds * log(A) * A ** p))
## ## nA = numpy.size(A)
## ## dz_dz0 = zeros([nA], dtype='Float64') + 1.0
## ## partials = array([array([dz_dc]), array([dz_dp]), array([dz_dz0])])
## ##
## ## return (A, params, z, partials)
##
## # IDL_fit_function()
#-------------------------------------------------------------------------
def open_input_files(self):
pass
# open_input_files()
#-------------------------------------------------------------------------
def read_input_files(self):
#----------------------------------------
# Get name of the info file and read it
#----------------------------------------
info = self.rti
# info = rti_files.read_info( self.DEM_file )
#-----------------------
# Read the initial DEM
#-----------------------
self.z0 = rtg_files.read_grid( self.DEM_file, info,
RTG_type=info.data_type )
self.DEM = self.z0.copy()
#---------------------------------
# Store original DEM min and max
#---------------------------------
self.z0min = numpy.nanmin( self.z0 )
self.z0max = numpy.nanmax( self.z0 )
#------------------------------------------------
# Could read these, but now we use d8_global.py
# to compute them to allow evolution.
#------------------------------------------------
# self.areas = rtg_files.read_grid( self.area_file, info, RTG_type='FLOAT' )
# self.codes = rtg_files.read_grid( self.flow_file, info, RTG_type='BYTE' )
# read_input_files()
#-------------------------------------------------------------------------
def close_input_files(self):
pass
# close_input_files()
#-------------------------------------------------------------------------
def update_outfile_names(self):
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.z_gs_file = (self.out_directory + self.z_gs_file)
self.D8_gs_file = (self.out_directory + self.D8_gs_file)
self.S_gs_file = (self.out_directory + self.S_gs_file)
self.A_gs_file = (self.out_directory + self.A_gs_file)
#---------------------------------------------------------
self.z_ts_file = (self.out_directory + self.z_ts_file)
self.D8_ts_file = (self.out_directory + self.D8_ts_file)
self.S_ts_file = (self.out_directory + self.S_ts_file)
self.A_ts_file = (self.out_directory + self.A_ts_file)
## self.new_DEM_file = (self.out_directory + self.new_DEM_file)
## self.new_rawDEM_file = (self.out_directory + self.new_rawDEM_file)
## self.new_slope_file = (self.out_directory + self.new_slope_file)
## self.new_flow_file = (self.out_directory + self.new_flow_file)
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
model_output.check_netcdf() # (test import and info message)
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
# open_new_gs_file() has a "dtype" keyword that defaults
# to "float32". Flow codes have dtype = "uint8".
#-----------------------------------------------------------
if (self.SAVE_Z_GRIDS):
model_output.open_new_gs_file( self, self.z_gs_file, self.rti,
var_name='z',
long_name='elevation',
units_name='m')
if (self.SAVE_D8_GRIDS):
model_output.open_new_gs_file( self, self.D8_gs_file, self.rti,
dtype='uint8',
var_name='D8',
long_name='D8 flow direction codes',
units_name='none')
if (self.SAVE_S_GRIDS):
model_output.open_new_gs_file( self, self.S_gs_file, self.rti,
var_name='S',
long_name='surface slope',
units_name='m/m')
if (self.SAVE_A_GRIDS):
model_output.open_new_gs_file( self, self.A_gs_file, self.rti,
var_name='A',
long_name='D8 contributing area',
units_name='km^2')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_Z_PIXELS):
model_output.open_new_ts_file( self, self.z_ts_file, IDs,
var_name='z',
long_name='elevation',
units_name='m')
if (self.SAVE_D8_PIXELS):
model_output.open_new_ts_file( self, self.D8_ts_file, IDs,
dtype='uint8',
var_name='D8',
long_name='D8 flow direction codes',
units_name='none')
if (self.SAVE_S_PIXELS):
model_output.open_new_ts_file( self, self.S_ts_file, IDs,
var_name='S',
long_name='surface slope',
units_name='m/m')
if (self.SAVE_A_PIXELS):
model_output.open_new_ts_file( self, self.A_ts_file, IDs,
var_name='A',
long_name='D8 contributing area',
units_name='km^2')
#-------------------------------------
# Save FLOAT version of original DEM
# as the rawDEM for the new DEM
#-------------------------------------
## if (self.rti.SWAP_ENDIAN):
## array(float32(self.z0), copy=0).byteswap(True)
## new_rawDEM_unit = open( self.new_rawDEM_file, 'wb' )
## float32(self.z0).tofile( new_rawDEM_unit )
## new_rawDEM_unit.close()
## self.new_DEM_unit = open(self.new_DEM_file, 'wb')
## self.new_slope_unit = open(self.new_slope_file, 'wb')
## self.new_rawDEM_unit = open(self.new_rawDEM_file, 'wb')
## self.new_flow_unit = open(self.new_flow_file, 'wb')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
#---------------------------------------------------------
# Notes: This function was written to use only model
# time (maybe from a caller) in seconds, and
# the save_grid_dt and save_pixels_dt parameters
# read by read_cfg_file().
#
# read_cfg_file() makes sure that all of
# the "save_dts" are larger than or equal to the
# process dt.
#---------------------------------------------------------
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
## SWAP_ENDIAN = self.rti.SWAP_ENDIAN
##
## #-----------------------
## # Save new DEM in file
## #-----------------------
## if (SWAP_ENDIAN):
## array(float32(self.DEM), copy=0).byteswap(True)
## float32(self.DEM).tofile( self.new_DEM_unit )
## #-----------------------------
## # Create RTI file for new DEM
## #------------------------------
## info = self.rti
## info.data_type = 'FLOAT'
## #info.DEM_file = str(self.new_DEM_unit.name)
## rti_files.write_info( self.new_RTI_file, info )
##
## #--------------------------------------
## # Create and save new slope grid file
## #-----------------------------------------
## # Subpixel sinuosity, if any, is applied
## # later in Route_Flow. Both ds and the
## # pID_grid were computed above.
## #-----------------------------------------
## ## slopes = (self.new_DEM - self.new_DEM[pID_grid]) / ds
## if (SWAP_ENDIAN):
## array(float32(self.S), copy=0).byteswap(True)
## float32(self.S).tofile( self.new_slope_unit )
##
## #------------------------------------
## # Save D8 flow grid of original DEM
## # as the flow grid of the new DEM
## #-----------------------------------------
## # Check that flow grid hasn't changed ?? ;**********************
## #-----------------------------------------
## if (SWAP_ENDIAN):
## array(self.d8.d8_grid, copy=0).byteswap(True)
## self.d8.d8_grid.tofile( self.new_flow_unit )
## # self.d8.d8_codes.tofile( self.new_flow_unit )
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
## self.new_DEM_unit.close()
## self.new_slope_unit.close()
## self.new_rawDEM_unit.close()
## self.new_flow_unit.close()
if (self.SAVE_Z_GRIDS): model_output.close_gs_file( self, 'z')
if (self.SAVE_D8_GRIDS): model_output.close_gs_file( self, 'D8')
if (self.SAVE_S_GRIDS): model_output.close_gs_file( self, 'S')
if (self.SAVE_A_GRIDS): model_output.close_gs_file( self, 'A')
#---------------------------------------------------------------------
if (self.SAVE_Z_PIXELS): model_output.close_ts_file( self, 'z')
if (self.SAVE_D8_PIXELS): model_output.close_ts_file( self, 'D8')
if (self.SAVE_S_PIXELS): model_output.close_ts_file( self, 'S')
if (self.SAVE_A_PIXELS): model_output.close_ts_file( self, 'A')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
#-----------------------------------
# Save grid stack to a netCDF file
#---------------------------------------------
# Note that add_grid() methods will convert
# var from scalar to grid now, if necessary.
#---------------------------------------------
if (self.SAVE_Z_GRIDS):
if (self.time_index == 0):
#--------------------------------------
# Save original DEM as the first grid
#--------------------------------------
model_output.add_grid( self, self.z0, 'z', 0.0 )
model_output.add_grid( self, self.DEM, 'z', self.time_min )
if (self.SAVE_D8_GRIDS):
model_output.add_grid( self, self.d8.d8_grid, 'D8', self.time_min )
if (self.SAVE_S_GRIDS):
model_output.add_grid( self, self.S, 'S', self.time_min )
if (self.SAVE_A_GRIDS):
model_output.add_grid( self, self.d8.A, 'A', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self): ##### save_time_series_data(self) #######
IDs = self.outlet_IDs
time = self.time_min #####
#-------------
# New method
#-------------
if (self.SAVE_Z_PIXELS):
model_output.add_values_at_IDs( self, time, self.DEM, 'z', IDs )
if (self.SAVE_D8_PIXELS):
model_output.add_values_at_IDs( self, time, self.d8.d8_grid, 'D8', IDs )
if (self.SAVE_S_PIXELS):
model_output.add_values_at_IDs( self, time, self.S, 'S', IDs )
if (self.SAVE_A_PIXELS):
model_output.add_values_at_IDs( self, time, self.d8.A, 'A', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
| mit |
rynecarbone/power_ranker | power_ranker/two_step_dom.py | 1 | 3277 | #!/usr/bin/env python
"""Calculate two step dominance matrix"""
import logging
import pandas as pd
from scipy.sparse import coo_matrix
__author__ = 'Ryne Carbone'
logger = logging.getLogger(__name__)
def get_two_step_dom_ranks(df_schedule, week, sq_weight=0.25, decay_penalty=0.5):
"""Calculate rankings using two step dominance matrix
Note: No longer returning 'normed' dominance rankings
Need to normalize by average rank after joining to team data
:param df_schedule: daata frame with rows for each matchup
:param week: current week
:param sq_weight: weight for the squared wins matrix
:param decay_penalty: weigh current wins more
:return: data frame with rankings for each team
"""
wins_matrix = calc_wins_matrix(df_schedule, week, decay_penalty)
dom_matrix = (1-sq_weight)*wins_matrix + sq_weight*(wins_matrix@wins_matrix)
# For each row, sum values across the columns
dom_ranks = pd.DataFrame(dom_matrix.sum(axis=1), columns=['dom'])
# Add in team_id so we can join later
dom_ranks['team_id'] = dom_ranks.index
# Normalize by max dominance score
dom_ranks['dom'] = dom_ranks.get('dom') / dom_ranks.get('dom').max()
return dom_ranks
def calc_wins_matrix(df_schedule, week, decay_penalty):
"""Calculate wins matrix from season schedule
Note: there will be some extra zero-filled rows if team ids
are non-contiguous, for example from teams leaving/entering
the league from year to year. This will be fine when we sum
the rows later.
:param df_schedule: data frame with rows for each matchup
:param week: current week
:param decay_penalty: weigh current wins more
:return: n_teams x n_teams wins matrix
"""
# Create COO formatted wins matrix
# v, (x,y) where:
# x: team id
# y: opponent id
# v: (1-decay) + decay * week_i/current_week if team wins else 0
# Note: takes care of repeat (x,y) by summing v as expected
max_id = max(df_schedule.get('away_id').max(),
df_schedule.get('home_id').max())
df_schedule_coo = (
df_schedule
.query(f'matchupPeriodId<={week} & winner!="UNDECIDED"')
[['away_id', 'home_id', 'matchupPeriodId', 'winner']]
)
# Calculate matrix values for away wins
df_schedule_coo['away_value'] = df_schedule_coo.apply(
lambda x: (1 - decay_penalty) + decay_penalty * x.get('matchupPeriodId') / week
if x.get('winner') == 'AWAY' else 0,
axis=1
)
# Calculate matrix values for home wins
df_schedule_coo['home_value'] = df_schedule_coo.apply(
lambda x: (1 - decay_penalty) + decay_penalty * x.get('matchupPeriodId') / week
if x.get('winner') == 'HOME' else 0,
axis=1
)
# Convert from series to sparse coo, add home and away values
wins_matrix = coo_matrix(
(df_schedule_coo.get('away_value').values,
(df_schedule_coo.get('away_id').values,
df_schedule_coo.get('home_id').values)
), shape=(max_id + 1, max_id + 1)
)
wins_matrix += coo_matrix(
(df_schedule_coo.get('home_value').values,
(df_schedule_coo.get('home_id').values,
df_schedule_coo.get('away_id').values)
), shape=(max_id+1, max_id+1)
)
return wins_matrix
| mit |
cdegroc/scikit-learn | sklearn/datasets/samples_generator.py | 2 | 36046 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numpy as np
from scipy import linalg
from ..utils import array2d, check_random_state
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
**References**:
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
"be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in xrange(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in xrange(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in xrange(n_clusters):
C[k, :] *= generator.rand()
for f in xrange(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in xrange(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in xrange(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in xrange(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
indices = range(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50,
allow_unlabeled=True, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : list of tuples
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
return np.array(X, dtype=np.float64), Y
def make_regression(n_samples=100, n_features=100, n_informative=10, bias=0.0,
effective_rank=None, tail_strength=0.5, noise=0.0,
shuffle=True, coef=False, random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
coef : array of shape [n_features], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros(n_features)
ground_truth[:n_informative] = 100 * generator.rand(n_informative)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
indices = range(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
if coef:
return X, y, ground_truth
else:
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> X.shape
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, int):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in xrange(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
Notes
-----
**References**:
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 \
+ (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
Notes
-----
**References**:
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) \
+ noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
Notes
-----
**References**:
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * \
np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in xrange(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
Notes
-----
**References**:
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have assymetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
**References**:
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
jm-begon/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
mengyun1993/RNN-binary | rnn04.py | 1 | 22389 | """ Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('trainProcess/trainOutput-b04-500-200-20.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 80
n_in = 11
n_out = 66
n_steps = 40
n_seq = 1000
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
BASE_DIR = os.path.dirname(__file__)
#file_path1 = os.path.join(BASE_DIR,"traindata/inputdata-b04-1000-40.txt")
file_path1 = os.path.join(BASE_DIR,"traindata/inputdata-b04-40-20-50.txt")
for l in open(file_path1):
#for l in open("inputdata-b02-300-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest = []
count = 0
dataTest = []
#file_path2 = os.path.join(BASE_DIR, 'testdata/inputdata-b04-300-40.txt')
file_path2 = os.path.join(BASE_DIR, "testdata/inputdata-b04-20-20-30.txt")
for l in open(file_path2):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest.append(row)
if (count == n_steps):
count = 0
if len(dataTest) >0:
seqlistTest.append(dataTest)
dataTest = []
seqarrayTest = np.asarray(seqlistTest)
seqTest = seqarrayTest[:,:,:n_in]
targetsTest = seqarrayTest[:,:,n_in:]
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.15, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest, targetsTest, validation_frequency=1000)
ferror = file('errorRate/errorRate-b04-2000-600-20.txt','a+')
[seqNum,lineNum,colNum] = targetsTest.shape
#print (seqTest.shape)
seqs = xrange(seqNum)
error = [0 for i in range(lineNum*seqNum)]
errorsum = 0
for k in seqs:
guess = model.predict_proba(seqTest[k])
dif = abs(guess - targetsTest[k])
[lineDif,colDif] = dif.shape
#print(lineDif,colDif)
for i in range (lineDif):
ki = k*lineDif+i
for j in range (colDif):
if (dif[i][j] > 0.5):
error[ki] += 1
ferror.write('error %d = %d \n' % (ki,error[ki]))
if (error[ki]>0):
errorsum += 1
print(errorsum)
errorRate = errorsum/1.0/seqNum/lineNum
ferror.write("average error = %f \n" % (errorRate))
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed: model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
##logging.basicConfig(
## level = logging.INFO,
## format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
## datafmt = '%m-%d %H:%M',
## filename = "D:/logresult20160123/one.log",
## filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=30)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
| bsd-3-clause |
jelugbo/ddi | docs/en_us/platform_api/source/conf.py | 5 | 6731 | # -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=W0622
# pylint: disable=W0212
# pylint: disable=W0613
import sys, os
from path import path
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "lms/djangoapps/mobile_api")
sys.path.append(root / "lms/djangoapps/mobile_api/course_info")
sys.path.append(root / "lms/djangoapps/mobile_api/users")
sys.path.append(root / "lms/djangoapps/mobile_api/video_outlines")
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform API Version 0.5 Alpha'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
marcocaccin/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
enigmampc/catalyst | catalyst/utils/data.py | 11 | 12760 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
| apache-2.0 |
scotthartbti/android_external_chromium_org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/tests/test_backend_bases.py | 7 | 3269 | from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import RendererBase
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.path as path
from nose.tools import assert_equal
import numpy as np
import os
import shutil
import tempfile
def test_uses_per_path():
id = transforms.Affine2D()
paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
tforms = [id.rotate(i) for i in range(1, 5)]
offsets = np.arange(20).reshape((10, 2))
facecolors = ['red', 'green']
edgecolors = ['red', 'green']
def check(master_transform, paths, all_transforms,
offsets, facecolors, edgecolors):
rb = RendererBase()
raw_paths = list(rb._iter_collection_raw_paths(master_transform,
paths, all_transforms))
gc = rb.new_gc()
ids = [path_id for xo, yo, path_id, gc0, rgbFace in
rb._iter_collection(gc, master_transform, all_transforms,
range(len(raw_paths)), offsets,
transforms.IdentityTransform(),
facecolors, edgecolors, [], [], [False],
[], 'data')]
uses = rb._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
seen = [0] * len(raw_paths)
for i in ids:
seen[i] += 1
for n in seen:
assert n in (uses-1, uses)
check(id, paths, tforms, offsets, facecolors, edgecolors)
check(id, paths[0:1], tforms, offsets, facecolors, edgecolors)
check(id, [], tforms, offsets, facecolors, edgecolors)
check(id, paths, tforms[0:1], offsets, facecolors, edgecolors)
check(id, paths, [], offsets, facecolors, edgecolors)
for n in range(0, offsets.shape[0]):
check(id, paths, tforms, offsets[0:n, :], facecolors, edgecolors)
check(id, paths, tforms, offsets, [], edgecolors)
check(id, paths, tforms, offsets, facecolors, [])
check(id, paths, tforms, offsets, [], [])
check(id, paths, tforms, offsets, facecolors[0:1], edgecolors)
@cleanup
def test_get_default_filename():
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
filename = canvas.get_default_filename()
assert_equal(filename, 'image.png')
finally:
shutil.rmtree(test_dir)
@cleanup
def test_get_default_filename_already_exists():
# From #3068: Suggest non-existing default filename
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
# create 'image.png' in figure's save dir
open(os.path.join(test_dir, 'image.png'), 'w').close()
filename = canvas.get_default_filename()
assert_equal(filename, 'image-1.png')
finally:
shutil.rmtree(test_dir)
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
aajtodd/zipline | tests/serialization_cases.py | 20 | 3818 | import datetime
import pytz
import nose.tools as nt
import pandas.util.testing as tm
import pandas as pd
from zipline.finance.blotter import Blotter, Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.performance.period import PerformancePeriod
from zipline.finance.performance.position import Position
from zipline.finance.performance.tracker import PerformanceTracker
from zipline.finance.performance.position_tracker import PositionTracker
from zipline.finance.risk.cumulative import RiskMetricsCumulative
from zipline.finance.risk.period import RiskMetricsPeriod
from zipline.finance.risk.report import RiskReport
from zipline.finance.slippage import (
FixedSlippage,
Transaction,
VolumeShareSlippage
)
from zipline.protocol import Account
from zipline.protocol import Portfolio
from zipline.protocol import Position as ProtocolPosition
from zipline.finance.trading import SimulationParameters
from zipline.utils import factory
def stringify_cases(cases, func=None):
# get better test case names
results = []
if func is None:
def func(case):
return case[0].__name__
for case in cases:
new_case = list(case)
key = func(case)
new_case.insert(0, key)
results.append(new_case)
return results
sim_params_daily = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='daily')
sim_params_minute = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='minute')
returns = factory.create_returns_from_list(
[1.0], sim_params_daily)
def object_serialization_cases(skip_daily=False):
# Wrapped in a function to recreate DI objects.
cases = [
(Blotter, (), {}, 'repr'),
(Order, (datetime.datetime(2013, 6, 19), 8554, 100), {}, 'dict'),
(PerShare, (), {}, 'dict'),
(PerTrade, (), {}, 'dict'),
(PerDollar, (), {}, 'dict'),
(PerformancePeriod,
(10000,), {'position_tracker': PositionTracker()}, 'to_dict'),
(Position, (8554,), {}, 'dict'),
(PositionTracker, (), {}, 'dict'),
(PerformanceTracker, (sim_params_minute,), {}, 'to_dict'),
(RiskMetricsCumulative, (sim_params_minute,), {}, 'to_dict'),
(RiskMetricsPeriod,
(returns.index[0], returns.index[0], returns), {}, 'to_dict'),
(RiskReport, (returns, sim_params_minute), {}, 'to_dict'),
(FixedSlippage, (), {}, 'dict'),
(Transaction,
(8554, 10, datetime.datetime(2013, 6, 19), 100, "0000"), {},
'dict'),
(VolumeShareSlippage, (), {}, 'dict'),
(Account, (), {}, 'dict'),
(Portfolio, (), {}, 'dict'),
(ProtocolPosition, (8554,), {}, 'dict')
]
if not skip_daily:
cases.extend([
(PerformanceTracker, (sim_params_daily,), {}, 'to_dict'),
(RiskMetricsCumulative, (sim_params_daily,), {}, 'to_dict'),
(RiskReport, (returns, sim_params_daily), {}, 'to_dict'),
])
return stringify_cases(cases)
def assert_dict_equal(d1, d2):
# check keys
nt.assert_is_instance(d1, dict)
nt.assert_is_instance(d2, dict)
nt.assert_set_equal(set(d1.keys()), set(d2.keys()))
for k in d1:
v1 = d1[k]
v2 = d2[k]
asserter = nt.assert_equal
if isinstance(v1, pd.DataFrame):
asserter = tm.assert_frame_equal
if isinstance(v1, pd.Series):
asserter = tm.assert_series_equal
try:
asserter(v1, v2)
except AssertionError:
raise AssertionError('{k} is not equal'.format(k=k))
| apache-2.0 |
Rossonero/bmlswp | ch05/utils.py | 24 | 7111 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
try:
import ujson as json # UltraJSON if available
except:
import json
from matplotlib import pylab
import numpy as np
from data import CHART_DIR
def fetch_data(filename, col=None, line_count=-1, only_questions=False):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
data = Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction = line.split(
"\t")
IsQuestion = int(IsQuestion)
if only_questions and not IsQuestion:
continue
if col:
if col < 6:
val = int(data[col])
else:
val = data[col]
yield val
else:
Id = int(Id)
assert Id >= 0, line
ParentId = int(ParentId)
IsAccepted = int(IsAccepted)
assert not IsQuestion == IsAccepted == 1, "%i %i --- %s" % (
IsQuestion, IsAccepted, line)
assert (ParentId == -1 and IsQuestion) or (
ParentId >= 0 and not IsQuestion), "%i %i --- %s" % (ParentId, IsQuestion, line)
TimeToAnswer = int(TimeToAnswer)
Score = int(Score)
NumTextTokens = int(NumTextTokens)
NumCodeLines = int(NumCodeLines)
LinkCount = int(LinkCount)
MisSpelledFraction = float(MisSpelledFraction)
yield Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction
def fetch_posts(filename, with_index=True, line_count=-1):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
Id, Text = line.split("\t")
Text = Text.strip()
if with_index:
yield int(Id), Text
else:
yield Text
def load_meta(filename):
meta = json.load(open(filename, "r"))
keys = list(meta.keys())
# JSON only allows string keys, changing that to int
for key in keys:
meta[int(key)] = meta[key]
del meta[key]
# post Id to index in vectorized
id_to_idx = {}
# and back
idx_to_id = {}
for PostId, Info in meta.items():
id_to_idx[PostId] = idx = Info['idx']
idx_to_id[idx] = PostId
return meta, id_to_idx, idx_to_id
def plot_roc(auc_score, name, fpr, tpr):
pylab.figure(num=None, figsize=(6, 5))
pylab.plot([0, 1], [0, 1], 'k--')
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % (
auc_score, name))
pylab.legend(loc="lower right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.fill_between(tpr, fpr, alpha=0.5)
pylab.plot(fpr, tpr, lw=1)
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_") + ".png"))
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.figure(num=None, figsize=(6, 5))
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label))
pylab.fill_between(recall, precision, alpha=0.5)
pylab.grid(True, linestyle='-', color='0.75')
pylab.plot(recall, precision, lw=1)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_" + filename + ".png"))
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = list(zip(c_f[:n], c_f[:-(n + 1):-1]))
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_feat_importance(feature_names, clf, name):
pylab.figure(num=None, figsize=(6, 5))
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
if len(data_name_list) > 1:
assert filename is not None
pylab.figure(num=None, figsize=(8, 6))
num_rows = int(1 + (len(data_name_list) - 1) / 2)
num_cols = int(1 if len(data_name_list) == 1 else 2)
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Fraction')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name, title):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, test_errors, "--", data_sizes, train_errors, "b-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "bv_" + name.replace(" ", "_") + ".png"), bbox_inches="tight")
def plot_k_complexity(ks, train_errors, test_errors):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('k')
pylab.ylabel('Error')
pylab.title('Errors for for different values of $k$')
pylab.plot(
ks, test_errors, "--", ks, train_errors, "-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
| mit |
wasserfeder/lomap | examples/ijrr2014_rec_hor/view.py | 1 | 6104 | #! /usr/bin/env python
# Copyright (C) 2012-2015, Alphan Ulusoy ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division
import itertools as it
from six.moves import zip as izip
import matplotlib as mpl
#mpl.use("agg")
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
from matplotlib import animation
class View(object):
def __init__(self, env, quad):
"""Creates a figure window and initializes view parameters
for the environment and the quadrotor.
"""
# Create the figure window
self.fig = plt.figure(figsize=(4*3.13,3*3.13))
self.ax = self.fig.gca()
self.ax.xaxis.set_ticklabels([])
self.ax.yaxis.set_ticklabels([])
self.ax.xaxis.set_ticks(list(range(-100,100)))
self.ax.yaxis.set_ticks(list(range(-100,100)))
self.margin = quad.sensing_range // 2 # integer division
# Scaled
plt.axis('scaled')
plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)
# Save pointer to env and quad
self.env = env
self.quad = quad
# Defines the quadrotor
self.define_quadrotor()
# Defines polygons for locally sensed requests
self.define_local()
# Draw the a priori known regions in the environment (called once only here)
self.draw_regions()
# Path line object
self.path_line = None
self.arrow = None
# Draw the quadrotor
self.draw_quad()
def define_quadrotor(self):
# Create the square cells for local sensing range of the quadrotor
# In quad_cells[i][j], 'cell' gives the object, 'text' gives the text on the cell
cell_cmd = "plt.Rectangle((0, 0), 1, 1, edgecolor = 'black', fill=False, linewidth = 0.5)"
self.quad_cells = [[dict() for y in range(0, self.quad.sensing_range)] for x in range(0, self.quad.sensing_range)]
for x,y in it.product(range(0, self.quad.sensing_range), repeat=2):
self.quad_cells[x][y] = {'cell': eval(cell_cmd), 'text': self.ax.text(0.5,0.5,'X',fontsize=10,ha='center',va='center',weight='bold')}
self.ax.add_artist(self.quad_cells[x][y]['cell'])
# Create circles for drawing the quad (0.20 radius)
blade_cmd = 'plt.Circle((0,0),0.20,fill=False,linewidth=1)'
self.quad_blades = [None]*4
for i in range(0,4):
self.quad_blades[i] = eval(blade_cmd)
self.ax.add_artist(self.quad_blades[i])
def get_vertices_of_cell(self, cell):
x, y = cell
lower_left = (x-0.5, y-0.5)
lower_right = (x+0.5, y-0.5)
upper_left = (x-0.5, y+0.5)
upper_right = (x+0.5, y+0.5)
return (lower_left, upper_left, upper_right, lower_right, lower_left)
def draw_regions(self):
"""Draws the regions
"""
global_reqs = self.env.global_reqs
# For setting axis ranges properly
min_x, max_x, min_y, max_y = (self.quad.x, self.quad.x, self.quad.y, self.quad.y)
for cell in global_reqs:
color = global_reqs[cell]['color']
vertices = self.get_vertices_of_cell(cell)
# x and y points of each vertex for matplotlib
x, y = list(zip(*vertices))
self.ax.fill(x,y,color,edgecolor=color)
# For proper limits
min_x = min(min_x, min(x))
min_y = min(min_y, min(y))
max_x = max(max_x, max(x))
max_y = max(max_y, max(y))
# Set appropriate limits
plt.axis((min_x-self.margin, max_x+self.margin, min_y-self.margin, max_y+self.margin))
self.ax.tight=True
def define_local(self):
"""Defines polygons for locally sensed requests
"""
local = self.env.local_reqs
self.local_polygons = dict()
for cell in local:
color = local[cell]['color']
vertices = self.get_vertices_of_cell(cell)
self.local_polygons[cell] = plt.Polygon(vertices, facecolor=color, edgecolor=color, zorder=0)
def draw_local(self):
"""Draws locally sensed requests
"""
for name in self.local_polygons:
artist = self.local_polygons[name]
if artist not in self.ax.get_children():
# Not child of axis
if self.env.local_reqs[name]['on']:
# Must be added
self.ax.add_artist(artist)
elif not self.env.local_reqs[name]['on']:
# Child of axis and must be removed
self.local_polygons[name].remove()
def draw_quad(self):
# Creates the local polygons
self.draw_local()
# Translations for quad blades (NW, NE, SE, SW)
txty = ((-0.20, 0.20),(0.20, 0.20),(0.20,-0.20),(-0.20,-0.20))
# Transform circles as needed (translation and optional rotation)
for blade,(tx,ty) in izip(self.quad_blades,txty):
trans = Affine2D().translate(tx,ty).translate(self.quad.x, self.quad.y) + self.ax.transData
blade.set_transform(trans)
# Translations and labels for quad sensing cells
for x, y in it.product(range(0, self.quad.sensing_range), repeat = 2):
# Center coords of cell x,y
cell_x, cell_y = self.quad.get_sensing_cell_global_coords((x,y))
cell_trans = Affine2D().translate(-0.5,-0.5).translate(cell_x, cell_y) + self.ax.transData
self.quad_cells[x][y]['cell'].set_transform(cell_trans)
self.quad_cells[x][y]['text'].set_transform(cell_trans)
props = self.quad.sensed[x][y]['local_reqs'] | self.quad.sensed[x][y]['global_reqs']
new_text = ','.join(props)
self.quad_cells[x][y]['text'].set_text(new_text)
# Remove path
if self.path_line in self.ax.get_children():
self.path_line.remove()
self.arrow.remove()
def draw_path(self, vertices):
xs, ys = list(zip(*vertices))
dx = (xs[-1]-xs[-2])/1.5
dy = (ys[-1]-ys[-2])/1.5
self.path_line = self.ax.plot(xs, ys, 'r-', lw=2)[0]
self.arrow = self.ax.arrow(xs[-2], ys[-2], dx, dy, head_width=0.5, head_length=0.5, fc='r', ec='w')
| gpl-2.0 |
jimako1989/coincheckpy | coincheckpy.py | 1 | 15044 | # -*- coding: utf-8 -*-
""" COINCHECK API wrapper """
"""
AUTHOR: @jimako1989
GITHUB: github.com/jimako1989/coincheckpy
LICENSE: MIT
"""
import json,time,hmac,hashlib,requests,datetime
import pandas as pd
from decimal import *
getcontext().prec = 8
"""EndpointsMixin provides a mixin for the API instance """
class EndpointsMixin(object):
"""Public API"""
def ticker(self, **params):
""" Get a tick
Docs: https://coincheck.com/documents/exchange/api#ticker
"""
endpoint = 'api/ticker'
return self.request(endpoint, auth=False, params=params)
def public_trades(self, **params):
""" Get public trades
Docs: https://coincheck.com/documents/exchange/api#public-trades
"""
endpoint = 'api/trades'
return self.request(endpoint, auth=False, params=params)
def order_book(self, **params):
""" Get order books
Docs: https://coincheck.com/documents/exchange/api#order-book
"""
endpoint = 'api/order_books'
return self.request(endpoint, auth=False, params=params)
def order_rate(self, order_type, pair, **params):
""" Get order rate
Docs: https://coincheck.com/documents/exchange/api#order-rate
"""
params['order_type'] = order_type
params['pair'] = pair
endpoint = 'api/exchange/orders/rate'
return self.request(endpoint, auth=False, params=params)
def buy_rate(self, pair, **params):
""" Get buy rate
Docs: https://coincheck.com/documents/exchange/api#buy-rate
"""
params['pair'] = pair
endpoint = 'api/rate/' + pair
return self.request(endpoint, auth=False, params=params)
""" Private API """
""" Order """
def order_new(self, pair, order_type, **params):
""" Create a new order
Docs: https://coincheck.com/documents/exchange/api#order-new
"""
params['pair'] = pair
params['order_type'] = order_type
if (order_type == "buy" or order_type == "sell") and 'rate' not in params and 'amount' not in params:
raise InputError(['rate', 'amount'], order_type)
if order_type == "market_buy" and 'market_buy_amount' not in params:
raise InputError('market_buy_amount', order_type)
if (order_type == "market_sell" or order_type == "leverage_buy" or order_type == "leverage_sell") and 'amount' not in params:
raise InputError('amount', order_type)
if (order_type == "close_long" or order_type == "close_short") and 'amount' not in params and 'positions' not in params:
raise InputError(['amount', 'positions'], order_type)
endpoint = 'api/exchange/orders'
return self.request(endpoint, method='POST', params=params)
def order_opens(self, **params):
""" Get open orders
Docs: https://coincheck.com/documents/exchange/api#order-opens
"""
endpoint = 'api/exchange/orders/opens'
return self.request(endpoint, params=params)
def order_cancel(self, order_id, **params):
""" Cancel an order
Docs: https://coincheck.com/documents/exchange/api#order-cancel
"""
params['id'] = order_id
endpoint = 'api/exchange/orders/'+str(order_id)
return self.request(endpoint, method='DELETE', params=params)
def order_transactions(self, **params):
""" Get your transactions
Docs: https://coincheck.com/documents/exchange/api#order-transactions
"""
endpoint = 'api/exchange/orders/transactions'
return self.request(endpoint, params=params)
def order_positions(self, **params):
""" Get your leverage position list
Docs: https://coincheck.com/documents/exchange/api#order-positions
"""
endpoint = 'api/exchange/leverage/positions'
return self.request(endpoint ,params=params)
""" Account """
def account_balance(self, **params):
""" Check your balance
Docs: https://coincheck.com/documents/exchange/api#account-balance
"""
endpoint = 'api/accounts/balance'
return self.request(endpoint, params=params)
def account_leverage_balance(self, **params):
""" Check your leverage balance
Docs: https://coincheck.com/documents/exchange/api#account-leverage-balance
"""
endpoint = 'api/accounts/leverage_balance'
return self.request(endpoint, params=params)
def account_sendmoney(self, address, amount, **params):
""" Send money
Docs: https://coincheck.com/documents/exchange/api#account-sendmoney
"""
params['address'] = address
params['amount'] = amount
endpoint = 'api/send_money'
return self.request(endpoint,method='POST', params=params)
def account_sends(self, currency, **params):
""" Get the history of sent money
Docs: https://coincheck.com/documents/exchange/api#account-sends
"""
params['currency'] = currency
endpoint = 'api/send_money'
return self.request(endpoint, params=params)
def account_deposits(self, currency, **params):
""" Get the history of deposit money
Docs: https://coincheck.com/documents/exchange/api#account-deposits
"""
params['currency'] = currency
endpoint = 'api/deposit_money'
return self.request(endpoint, params=params)
def account_deposits_fast(self, order_id, **params):
""" Fast withdrawal
Docs: https://coincheck.com/documents/exchange/api#account-deposits-fast
"""
params['id'] = order_id
endpoint = 'api/deposit_money/'+str(order_id)+'/fast'
return self.request(endpoint, method='POST', params=params)
def account_info(self, **params):
""" Get account info.
Docs: https://coincheck.com/documents/exchange/api#account-info
"""
endpoint = 'api/accounts'
return self.request(endpoint, params=params)
""" Withdrawal """
def bank_accounts(self, **params):
""" Get bank accounts
Docs: https://coincheck.com/documents/exchange/api#bank-accounts
"""
endpoint = 'api/bank_accounts'
return self.request(endpoint, params=params)
def bank_accounts_create(self, bank_name, branch_name, bank_account_type, number, name, **params):
""" Create a bank account
Docs: https://coincheck.com/documents/exchange/api#bank-accounts-create
"""
endpoint = 'api/bank_accounts'
return self.request(endpoint, method='POST', params=params)
def bank_accounts_destroy(self, bank_id, **params):
""" Destroy a bank account
Docs: https://coincheck.com/documents/exchange/api#bank-accounts-destroy
"""
endpoint = 'api/bank_accounts/'+str(bank_id)
return self.request(endpoint, method='DELETE', params=params)
def withdraws(self, **params):
""" Get the history of withdraws
Docs: https://coincheck.com/documents/exchange/api#withdraws
"""
endpoint = 'api/withdraws'
return self.request(endpoint, params=params)
def withdraws_create(self, bank_account_id, amount, currency, is_fast=False, **params):
""" Apply the withdrawal
Docs: https://coincheck.com/documents/exchange/api#withdraws-create
"""
params['bank_account_id'] = bank_account_id
params['amount'] = amount
params['currency'] = currency
params['is_fast'] = is_fast
endpoint = 'api/withdraws'
return self.request(endpoint, method='POST', params=params)
def withdraws_destroy(self, withdrawal_id, **params):
""" Destroy a withdrawal
Docs: https://coincheck.com/documents/exchange/api#withdraws-destroy
"""
params['id'] = withdrawal_id
endpoint = 'api/withdraws'+str(withdrawal_id)
return self.request(endpoint, method='DELETE', params=params)
""" Borrow """
def create_borrow(self, amount, currency, **params):
""" Create a borrow
Docs: https://coincheck.com/documents/exchange/api#create-borrow
"""
params['amount'] = amount
params['currency'] = currency
endpoint = 'api/lending/borrows'
return self.request(endpoint, method='POST', params=params)
def read_borrow_matches(self, **params):
""" Get the list of borrows
Docs: https://coincheck.com/documents/exchange/api#read-borrow-matches
"""
endpoint = 'api/lending/borrows/matches'
return self.request(endpoint, params=params)
def create_repay(self, repay_id, **params):
""" Repayment
Docs: https://coincheck.com/documents/exchange/api#create-repay
"""
params['id'] = repay_id
endpoint = 'api/lending/borrows/'+str(repay_id)+'/repay'
return self.request(endpoint, method='POST', params=params)
""" Transfer """
def transfers_to_leverage(self, amount, currency, **params):
""" Transfers to your leverage account from your spot account.
Docs: https://coincheck.com/documents/exchange/api#transfers-to-leverage
"""
params['amount'] = amount
params['currency'] = currency
endpoint = 'api/exchange/transfers/to_leverage'
return self.request(endpoint, method='POST', params=params)
def transfers_from_leverage(self, amount, currency, **params):
""" Transfers to your spot account from your leverage account.
Docs: https://coincheck.com/documents/exchange/api#transfers-from-leverage
"""
params['amount'] = amount
params['currency'] = currency
endpoint = 'api/exchange/transfers/from_leverage'
return self.request(endpoint, method='POST', params=params)
""" Get the historical prices of JPY/BTC """
def get_prices(self, term):
response = requests.get("https://coincheck.com/exchange/chart.json?line=true&term="+str(term)).json()['chart']
datetimeIndex = [pd.Timestamp(datetime.datetime.fromtimestamp(r[0]/1000.0)) for r in response]
rates = [int(r[1]) for r in response]
series = pd.Series(rates, index=datetimeIndex)
return(series)
""" Get the depth of BTC trading """
def get_depth(self):
response = requests.get("https://coincheck.com/exchange/depth_chart.json").json()['chart']
return(response['buy'], response['sell'])
""" Provides functionality for access to core COINCHECK API calls """
class API(EndpointsMixin, object):
def __init__(self, environment='live', key=None, secret_key=None):
""" Instantiates an instance of CoincheckPy's API wrapper """
if environment == 'live':
self.api_url = 'https://coincheck.com'
else:
# for future, access to a demo account.
pass
self.key = key
self.secret_key = bytes(secret_key, 'ascii')
self.client = requests.Session()
def request(self, endpoint, method='GET', auth=True, params=None):
""" Returns dict of response from Coincheck's open API """
self.nonce = str(int(time.time() * 10000))
url = '%s/%s' % ( self.api_url, endpoint)
request_args = {}
method = method.lower()
params = params or {}
if 'amount' in params:
params['amount'] = str(params['amount'])
if 'rate' in params:
params['rate'] = str(params['rate'])
request_args['headers'] = params
if method == 'get' or method == 'post':
if type(params) is dict:
url_endpoint = "?"
for (key, value) in params.items():
url_endpoint += str(key) + "=" + str(value) + "&"
url += url_endpoint[:-1]
elif method == 'delete':
pass
if auth:
message = bytes(self.nonce + url, 'ascii')
signature = hmac.new(self.secret_key, msg=message, digestmod=hashlib.sha256).hexdigest()
headers = {
"Content-Type":"application/json",\
"ACCESS-KEY":self.key,\
"ACCESS-NONCE":self.nonce,\
"ACCESS-SIGNATURE":signature
}
request_args['headers'].update(headers)
func = getattr(self.client, method)
try:
response = func(url, **request_args)
except requests.RequestException as e:
print (str(e))
content = response.json()
# error message
if response.status_code >= 400:
raise CoincheckError(response.status_code,content)
return content
"""HTTPS Streaming"""
class Streamer():
""" Provides functionality for HTTPS Streaming """
def __init__(self, environment='live', heartbeat=1.0):
""" Instantiates an instance of CoincheckPy's streaming API wrapper. """
if environment == 'live':
self.api_url = 'https://coincheck.com/api/ticker'
else:
# for future, access to a demo account.
pass
self.heartbeat = heartbeat
self.client = requests.Session()
def start(self, **params):
""" Starts the stream with the given parameters """
self.connected = True
request_args = {}
content_ = {'last':None,'bid':None,'volume':None,'ask':None,'low':None,'high':None}
while self.connected:
response = self.client.get(self.api_url, **request_args)
content = response.content.decode('ascii')
content = json.loads(content)
if response.status_code != 200:
self.on_error(content)
self.on_success(content)
time.sleep(self.heartbeat)
def on_success(self, content):
""" Called when data is successfully retrieved from the stream """
print(content)
return True
def on_error(self, content):
""" Called when stream returns non-200 status code
Override this to handle your streaming data.
"""
self.connected = False
return
""" Contains COINCHECK exception """
class CoincheckError(Exception):
""" Generic error class, catches coincheck response errors
"""
def __init__(self, status_code, error_response):
msg = "COINCHECK API returned error code %s (%s) " % (status_code, error_response['error'])
super(CoincheckError, self).__init__(msg)
""" Parameter input exception """
class InputError(Exception):
def __init__(self, must_parameters, order_type):
self.must_parameters = must_parameters
self.order_type = order_type
def __str__(self):
if type(must_parameters) == str:
return "Parameter: %s is needed if the order type is %s."%(must_parameters, order_type)
elif type(must_parameters) == list:
return "Parameters: %s are needed if the order type is %s."%(str(must_parameters), order_type)
| mit |
teonlamont/mne-python | examples/inverse/plot_compute_mne_inverse_volume.py | 40 | 1748 | """
=======================================================================
Compute MNE-dSPM inverse solution on evoked data in volume source space
=======================================================================
Compute dSPM inverse solution on MNE evoked dataset in a volume source
space and stores the solution in a nifti file for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
stc.crop(0.0, 0.2)
# Export result as a 4D nifti object
img = stc.as_volume(src,
mri_resolution=False) # set True for full MRI resolution
# Save it as a nifti file
# nib.save(img, 'mne_%s_inverse.nii.gz' % method)
t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=8.,
title='%s (t=%.1f s.)' % (method, stc.times[61]))
plt.show()
| bsd-3-clause |
ilyes14/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
xwolf12/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
wsmorgan/782 | basis/solve.py | 1 | 6421 | #!/usr/bin/python
from basis import msg
from basis.hamiltonian import Hamiltonian
import numpy as np
import matplotlib.pyplot as plt
def _solve_system(potcfg, n_basis, n_solutions, xl = None, xr = None, plot_f = None, outfile=None):
"""Solves the system for the given potential and the desired number of
basis functions. Output is written to file.
Args:
potcfg (str): The path to the `pot.cfg` file.
n_basis (int): The number of basis functions to use in the solution.
n_solutions (int): The number of solutions to be returned.
xl (float, optional): The left most edge of the potential if different
from that stored in the `pot.cfg` file.
xr (float, optional): The right most edge of the potential if different
from that stored in the `pot.cfg` file.
plot_f (bool, optional): True if the system is going to be plotted.
outfile (str, optional): The path to the desired output file.
Returns:
Output is saved to a csv file `1D_potential_sol.csv". If plot_f =
True a plot window is returned.
"""
ham = Hamiltonian(potcfg, n_basis, xl, xr)
eigen_vals = ham.eigenvals[:n_solutions]
eigen_vecs = np.transpose(ham.eigenvecs[:n_solutions])
with open(outfile,"w+") as outf:
outf.write("Eigenval Eigenvec\n")
for i in range(n_solutions):
temp = [str(eigen_vals[i])+" "]
for j in range(len(eigen_vecs)):
temp.append(str(eigen_vecs[i][j]))
outf.write(" ".join(temp)+"\n")
L = abs(ham.domain[1] - ham.domain[0])
if plot_f == "pot": # pragma: no cover
xs = np.arange(ham.domain[0],ham.domain[1],0.01)
Vs = list(map(ham.pot,xs))
plt.plot(xs,Vs)
plt.savefig('pot.pdf')
elif plot_f == "waves": # pragma: no cover
for i in [0,9]:
wave = eigen_vecs[i]
xs = np.arange(ham.domain[0],ham.domain[1],0.01)
psi_x = []
env = []
for x in xs:
sin_x = 0
for n in range(len(wave)):
sin_x += wave[n]*np.sqrt(2./L)*np.sin((n+1)*np.pi*x/L)
env.append(np.sin(x*np.pi*(i+1)/L))
psi_x.append(sin_x)
plt.plot(xs,psi_x)
plt.plot(xs,env)
plt.savefig('waves.pdf')
elif plot_f =="en": # pragma: no cover
ens = []
ks = []
squ_well = []
for n in range(35):
ens.append(eigen_vals[n]/(np.pi**2))
ks.append(n/L)
squ_well.append(n*n/(L**2))
plt.plot(ks[:-1],ens[:-1],'ro')
plt.plot(ks,squ_well)
plt.xlim((0.,3.))
plt.ylim((0.,10.))
plt.savefig('energy.pdf')
def examples():
"""Prints examples of using the script to the console using colored output.
"""
script = "BASIS: 1D quantum potential solver using basis expansion."
explain = ("For simple 1D potentials such as the infinite square well, "
"kronig-penny, ect. This code produces a numerical solution "
"using a bisis expansion.")
contents = [(("Solve the potential in `kp.cfg` using 200 basis functions."),
"solve.py 200 -potential kp.cfg",
"This saves the solution to the default 'output.dat'."
"file in the current directory.")]
required = ("REQUIRED: potential config file `pot.cfg`.")
output = ("RETURNS: plot window if `-plot` is specified; solution "
"output is written to file.")
details = ("The plotting uses `matplotlib` with the default configured "
"backend. If you want a different backend, set the rc config "
"for `matplotlib` using online documentation. However, many "
"backends don't play well with the animation (depending on OS "
"type and version, etc., etc.; so use carefully.")
outputfmt = ("")
msg.example(script, explain, contents, required, output, outputfmt, details)
script_options = {
"N": dict(default=100, type=int,
help=("Specifies the number of basis function to be used.")),
"-plot": dict(help=("Plot the potential (pot), the wave functions (wave), "
"the energies (en).")),
"-potential": dict(help=("Path to the file that has the potential parameters.")),
"-outfile": dict(default="output.dat",
help="Override the default output file nome."),
"-solutions": dict(default = 10, type=int,
help="The number of solutions to be written to file."),
"-left_edge": dict(default = None, type=float,
help="Override the left most edge of the potential "
"that has diffined in potential file."),
"-right_edge": dict(default = None, type=float,
help="Override the right most edge of the potential "
"that has diffined in potential file.")
}
"""dict: default command-line arguments and their
:meth:`argparse.ArgumentParser.add_argument` keyword arguments.
"""
def _parser_options():
"""Parses the options and arguments from the command line."""
#We have two options: get some of the details from the config file,
import argparse
from basis import base
pdescr = "1D Quantum Potential Solver."
parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)
for arg, options in script_options.items():
parser.add_argument(arg, **options)
args = base.exhandler(examples, parser)
if args is None:
return
if args["plot"] != None:
args["plot"] = args["plot"].lower()
return args
def run(args):
if not args["potential"]:
raise KeyError("A potential file must be provided using the -potential flag.")
elif args["plot"]: # pragma: no cover
_solve_system(args["potential"], args["N"], args["solutions"], xl=args["left_edge"]
,xr=args["right_edge"], outfile = args["outfile"], plot_f = args["plot"])
else:
_solve_system(args["potential"], args["N"], args["solutions"], xl=args["left_edge"]
,xr=args["right_edge"], outfile = args["outfile"])
if __name__ == '__main__': # pragma: no cover
run(_parser_options())
| mit |
gkunter/coquery | coquery/tables.py | 1 | 21550 | # -*- coding: utf-8 -*-
"""
tables.py is part of Coquery.
Copyright (c) 2016-2018 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import collections
import pandas as pd
import re
import sys
from .defines import SQL_MYSQL, SQL_SQLITE
from .unicode import utf8
def varchar(n, not_null=True):
"""
Returns a string that can be used in an SQL table definition to specify
VARCHAR field types.
"""
return "VARCHAR({}){}".format(n, " NOT NULL" if not_null else "")
def mediumint(n, unsigned=True, not_null=True):
"""
Returns a string that can be used in an SQL table definition to specify
MEDIUMINT field types.
"""
return "MEDIUMINT({}){}{}".format(n,
" UNSIGNED" if unsigned else "",
" NOT NULL" if not_null else "")
def smallint(n, unsigned=True, not_null=True):
"""
Returns a string that can be used in an SQL table definition to specify
SMALLINT field types.
"""
return "SMALLINT({}){}{}".format(n,
" UNSIGNED" if unsigned else "",
" NOT NULL" if not_null else "")
def enum(*values, not_null=True):
"""
Returns a string that can be used in an SQL table definition to specify
ENUM field types.
"""
return "ENUM({}){}".format(
",".join(
["'{}'".format(s.replace("'", "''")) for s in values]),
" NOT NULL" if not_null else "")
class Column(object):
""" Define an object that stores the description of a column in one
MySQL table."""
is_identifier = False
key = False
def __init__(self, name, data_type, index_length=None):
"""
Initialize the column
Parameters
----------
name : str
The name of the column
data_type : str
A MySQL data type description
index_length : int or None
The length of the index for this column. If None, the index length
will be determined automatically, which can take quite some time
for larger corpora.
"""
self._name = name
self._data_type = data_type
self.index_length = index_length
self.unique = False
self.create = True
def __repr__(self):
return "Column({}, {}, {})".format(self._name,
self._data_type,
self.index_length)
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def data_type(self):
"""
Return the data type of the column.
Returns
-------
data_type : string
The data type of the column in the same form as used by the
MySQL CREATE TABLE command.
"""
return self._data_type
@property
def base_type(self):
"""
Return the base type of the column.
This function does not return the field length, but only the base
data type, i.e. VARCHAR, MEDIUMINT, etc.
Use data_type for the full column specification.
Returns
-------
base_type : string
A MySQL base data type.
"""
return self._data_type.split()[0].partition("(")[0].upper()
@data_type.setter
def data_type(self, new_type):
self._data_type = new_type
def is_numeric(self):
return (self.base_type.endswith("INT") or
self.base_type in ("FLOAT", "REAL", "DECIMAL",
"NUMERIC", "DOUBLE"))
class Identifier(Column):
""" Define a Column class that acts as the primary key in a table."""
is_identifier = True
def __init__(self, name, data_type, unique=True, index_length=None):
super(Identifier, self).__init__(name, data_type, index_length)
self.unique = unique
def __repr__(self):
return ("Identifier(name='{}', data_type='{}', unique={}, "
"index_length={})").format(self._name, self._data_type,
self.unique, self.index_length)
@property
def name(self):
return self._name
@property
def alias(self):
if self.unique:
return self.name
else:
return "{}_primary".format(self.name)
class Link(Column):
""" Define a Column class that links a table to another table. In SQL
terms, this acts like a foreign key."""
key = True
def __init__(self, name, table_name, create=True):
super(Link, self).__init__(name, "", True)
self._link = table_name
self.create = create
def __repr__(self):
return "Link(name='{}', '{}', data_type='{}')".format(
self._name, self._link, self._data_type)
def get_dtype(self, tables):
"""
Look up the data type of the primary key of the linked table.
"""
for tab in tables:
if tab.name == self._link:
return tab.primary.data_type
raise ValueError("No corresponding table found for {}".format(self))
class Table(object):
""" Define a class that is used to store table definitions."""
def __init__(self, name):
self._name = name
self.columns = list()
self.primary = None
self._current_id = 0
self._row_order = []
self._add_cache = list()
# The defaultdict _add_lookup will store the index of rows in this
# table. It uses the trick described at http://ikigomu.com/?p=186
# to achieve an O(1) lookup. When looking up a row as in
#
# x = self._add_lookup[tuple([row[x] for x in self._row_order])]
#
# the returned value is the length of the lookup table at the time
# the entry was created. In other words, this is the row id of that
# row.
self._add_lookup = collections.defaultdict(
lambda: len(self._add_lookup) + 1)
self._commited = {}
self._col_names = None
self._engine = None
self._max_cache = 0
self._line_counter = 0
@property
def name(self):
return self._name
@name.setter
def name(self, s):
self._name = s
def setDB(self, db):
self._DB = db
def set_max_cache(self, new):
self._max_cache = new
def commit(self):
"""
Commit the table content to the data base.
This table commits the unsaved content of the table to the data base.
As this method is usually called after a file has been processed,
this ensures that all new table rows are commited, while at the same
time preserving some memory space.
"""
if self._add_cache:
df = pd.DataFrame(self._add_cache).fillna("")
try:
df.columns = self._get_field_order()
except ValueError as e:
raise ValueError("{}: {}".format(self.name, e))
# make sure that all strings are unicode, even under
# Python 2.7:
if sys.version_info < (3, 0):
for column in df.columns[df.dtypes == object]:
try:
df[column] = df[column].apply(utf8)
except TypeError:
pass
# apply unicode normalization:
for column in df.columns[df.dtypes == object]:
try:
df[column] = df[column].str.normalize("NFKC")
except TypeError:
pass
if not self.primary.unique:
if self._DB.db_type == SQL_SQLITE:
df[self.primary.alias] = range(
self._line_counter,
self._line_counter + len(df))
self._line_counter += len(df)
df.to_sql(self.name, self._DB.engine, if_exists="append",
index=False)
self._add_cache = list()
def add(self, values):
"""
Store the 'values' dictionary in the add cache of the table. If
necessary, a valid primary key is added to the values.
"""
lst = [values[x] for x in self._row_order]
if self.primary.name not in self._row_order:
self._current_id += 1
self._add_cache.append(tuple([self._current_id] + lst))
else:
# A few installers appear to depend on this, but actually, I
# can't see how this will ever get executed.
# Installers that pass entry IDs in the values:
# CELEX, GABRA, OBC2, SWITCHBOARD
self._current_id = values[self.primary.name]
self._add_cache.append(tuple(lst))
self._add_lookup[tuple(lst)] = self._current_id
if self._max_cache and len(self._add_cache) > self._max_cache:
self.commit()
# FIXME:
# this comparison may be optimized by increasing an int counter for
# each item that is added, and comparing the counter to
# self._max_cache instead of using len(self._add_cache)
return self._current_id
def add_with_id(self, values):
"""
Store the 'values' dictionary in the add cache of the table. The
primary key is assumed to be included in the values.
"""
tup = tuple([values[x] for x in [self.primary.name] + self._row_order])
self._current_id = values[self.primary.name]
self._add_cache.append(tup)
self._add_lookup[tup] = self._current_id
if self._max_cache and len(self._add_cache) > self._max_cache:
self.commit()
return self._current_id
def get_or_insert(self, values, case=False):
"""
Returns the id of the first entry matching the values from the table.
If there is no entry matching the values in the table, a new entry is
added to the table based on the values.
description.
Parameters
----------
values : dict
A dictionary with column names as keys, and the entry content
as values.
Returns
-------
id : int
The id of the entry, as it is stored in the SQL table.
"""
key = tuple([values[x] for x in self._row_order])
if key in self._add_lookup:
return self._add_lookup[key]
else:
return self.add(values)
def _get_field_order(self):
if self.primary.name not in self._row_order:
return [self.primary.name] + self._row_order
else:
return self._row_order
def find(self, values):
"""
Return the first row that matches the values, or None
otherwise.
"""
x = self._DB.find(self.name, values, [self.primary.name])
if x:
return x[0]
else:
return None
def get_column_order(self):
return self._row_order
def add_column(self, column):
self.columns.append(column)
if column.name in self._row_order:
if not column.key:
raise ValueError("Duplicate column: {}, {}".format(
self._row_order, column.name))
else:
return
if column.is_identifier:
self.primary = column
if not column.unique:
self._row_order.append(column.name)
else:
self._row_order.append(column.name)
def get_column(self, name):
"""
Return the specified column by name.
Parameters
----------
name : string
The name of the column
Returns
-------
col : object or NoneType
The Column object matching the name, or None.
"""
for x in self.columns:
if x.name == name:
return x
return None
def suggest_data_type(self, name):
"""
Return an SQL data type that may be optimal in terms of storage space.
For INT types, the optimal data type is the smallest integer type that
is large enough to store the integer.
For CHAR and TEXT types, the optimal data type is VARCHAR(max), where
max is the maximum number of characters for the column.
FOR DECIMAL and NUMERIC types, the optimal type is changed to FLOAT
on MySQL and to REAL on SQLite3.
For FLOAT, DOUBLE, and REAL types, the optimal type is not changed on
MySQL, but changed to REAL on SQLite3.
Parameters
----------
name : string
The name of the column
Returns
-------
S : string
A string containing the suggested data type
"""
sql_int = [
(0, 255, "TINYINT UNSIGNED"),
(-128, 127, "TINYINT"),
(0, 65535, "SMALLINT UNSIGNED"),
(-32768, 32767, "SMALLINT"),
(0, 16777215, "MEDIUMINT UNSIGNED"),
(-8388608, 8388607, "MEDIUMINT"),
(0, 4294967295, "INT UNSIGNED"),
(-2147483648, 2147483647, "INT")]
if self._DB.db_type == SQL_SQLITE:
func_length = "length"
elif self._DB.db_type == SQL_MYSQL:
func_length = "CHAR_LENGTH"
else:
func_length = "UNDEFINED"
col = self.get_column(name)
# test if column contains NULL
S = "SELECT MAX({0} IS NULL) FROM {1}".format(col.name, self.name)
with self._DB.engine.connect() as connection:
has_null = connection.execute(S).fetchone()[0]
# In an empty table, the previous check returns NULL. In this case,
# the original data type will be returned.
if has_null is None:
dt_type = col.data_type
# integer data types:
elif col.base_type.endswith("INT"):
S = ("SELECT MIN({0}), MAX({0}) FROM {1} WHERE {0} IS NOT NULL"
.format(col.name, self.name))
with self._DB.engine.connect() as connection:
v_min, v_max = connection.execute(S).fetchone()
for dt_min, dt_max, dt_label in sql_int:
if v_min >= dt_min and v_max <= dt_max:
dt_type = dt_label
break
else:
if v_min >= 0:
dt_type = "BIGINT UNSIGNED"
else:
dt_type = "BIGINT"
# character data types:
elif col.base_type.endswith(("CHAR", "TEXT")):
S = "SELECT MAX({2}(RTRIM({0}))) FROM {1}".format(
col.name, self.name, func_length)
with self._DB.engine.connect() as connection:
max_len = connection.execute(S).fetchone()[0]
dt_type = "VARCHAR({})".format(max_len + 1)
# fixed-point types:
elif col.base_type in ["DECIMAL", "NUMERIC"]:
if self._DB.db_type == SQL_SQLITE:
dt_type = "REAL"
else:
dt_type = col.data_type.replace(col.base_type, "FLOAT")
# float and decimal data types:
elif col.base_type in ["FLOAT", "DOUBLE", "REAL"]:
if self._DB.db_type == SQL_SQLITE:
dt_type = "REAL"
else:
dt_type = col.data_type
S = ("SELECT MIN({0}), MAX({0}) FROM {1} WHERE {0} IS NOT NULL"
.format(col.name, self.name))
with self._DB.engine.connect() as connection:
v_min, _ = connection.execute(S).fetchone()
# all other data types:
else:
dt_type = col.data_type
if has_null == 0 and "NOT NULL" not in dt_type:
dt_type = "{} NOT NULL".format(dt_type)
return dt_type
def _get_create_string_MySQL(self, tables, index_gen):
col_defs = []
for column in self.columns:
if not column.create:
continue
dtype = column.data_type
if column.key:
dtype = column.get_dtype(tables)
if not column.is_identifier:
col_defs.append("`{}` {}".format(column.name, dtype))
else:
if not column.unique:
# add surrogate key
# do not add AUTO_INCREMENT to strings or ENUMs:
s = "`{}_primary` INT NOT NULL AUTO_INCREMENT"
col_defs.insert(0, s.format(column.name))
col_defs.insert(1, "`{}` {}".format(column.name, dtype))
else:
# do not add AUTO_INCREMENT to strings or ENUMs:
if column.data_type.upper().startswith(
("ENUM", "VARCHAR", "TEXT")):
pattern = "`{}` {}"
else:
pattern = "`{}` {} AUTO_INCREMENT"
col_defs.append(pattern.format(column.name, dtype))
# add generated index column for next token?
#if index_gen:
#if "mariadb" in self._DB.version.lower():
#kwd = "PERSISTENT"
#else:
#kwd = "STORED"
# FIXME: GENERATED is available only in MySQL 5.7.5
# onward. There has to be a check for version.
#col_defs.append("Next{id} INT NOT NULL GENERATED ALWAYS AS ({id} + 1) {kwd}".format(
#id=column.name, kwd=kwd))
#col_defs.append("INDEX {id}Next{id} ({id}, Next{id})".format(
#id=column.name))
if self.primary.unique:
s = "PRIMARY KEY (`{}`)".format(self.primary.name)
else:
s = "PRIMARY KEY (`{}_primary`)".format(self.primary.name)
col_defs.append(s)
return ",\n\t".join(col_defs)
def _get_create_string_SQLite(self, tables, index_gen):
col_defs = []
for column in self.columns:
if not column.create:
continue
# SQLite doesn't support the ENUM data type. ENUM columns are
# therefore converted to VARCHAR columns:
match = re.match("^\s*enum\((.+)\)(.*)$",
column.data_type, re.IGNORECASE)
if match:
max_len = 0
for x in match.group(1).split(","):
max_len = max(max_len, len(x.strip(" '\"")))
dtype = "VARCHAR({max_len}) {spec}".format(
max_len=max_len, spec=match.group(2))
else:
dtype = column.data_type
if column.key:
dtype = column.get_dtype(tables)
if not column.is_identifier:
col_defs.append("{} {}".format(column.name, dtype))
else:
if not column.unique:
# add surrogate key
col_defs.insert(0, ("{}_primary INT NOT NULL PRIMARY KEY"
.format(column.name)))
col_defs.insert(1, ("{} {}".format(column.name, dtype)))
else:
col_defs.append(("{} {} PRIMARY KEY"
.format(column.name, dtype)))
# make SQLite columns case-insensitive by default
for i, x in enumerate(list(col_defs)):
field_type = x.split()[1]
if ("VARCHAR" in field_type.upper() or
"TEXT" in field_type.upper()):
col_defs[i] = "{} COLLATE NOCASE".format(x)
table_str = ",\n\t".join(col_defs)
table_str = re.sub(r"\s*UNSIGNED", "", table_str)
return table_str
def get_create_string(self, db_type, tables, index_gen=False):
"""
Generates the SQL command required to create the table.
Parameters
----------
db_type : str
A string representing the SQL engine, either "mysql" or "sqlite"
index_gen : bool
A boolean variable that indicates whether a generated indexed
column should be created for this table.
If `index_gen` is False, no generated index column will be
generated. If it is True, an generated column named `Next{}` will
will be generated with the primary index name inserted into the
string. This column will contain the value of the
primary key + 1.
At the moment, this is only available in MySQL databases.
tables : list of Table objects
A list of Table objects that is used to resolve links between
tables.
Returns
-------
S : str
A string that can be sent to the SQL engine in order to create
the table according to the specifications.
"""
if db_type == SQL_SQLITE:
table_str = self._get_create_string_SQLite(tables, index_gen)
else:
table_str = self._get_create_string_MySQL(tables, index_gen)
return table_str
| gpl-3.0 |
mizzao/ggplot | ggplot/tests/test_theme.py | 12 | 2707 | from nose.tools import assert_equal, assert_true, assert_raises
from numpy import linspace
from pandas import DataFrame
from ggplot.tests import image_comparison, cleanup
from ggplot import *
df = DataFrame({"x": linspace(999, 1111, 9),
"y": linspace(999, 1111, 9)})
simple_gg = ggplot(aes(x="x", y="y"), data=df) + geom_line()
def _test_theme1():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
gg_mpl1 = gg + theme_matplotlib()
gg.rcParams["foo"] = "bar"
gg_mpl2 = gg + theme_matplotlib()
assert_equal(gg_mpl1.rcParams, gg_mpl2.rcParams)
def _test_theme2():
gg = ggplot(aes(x='date', y='beef'), data=meat)
gg_g1 = gg + theme_gray()
gg.post_plot_callbacks.append("foo")
gg_g2 = gg + theme_gray()
assert_equal(gg_g1.post_plot_callbacks, gg_g2.post_plot_callbacks)
def test_theme3():
tg = theme_gray()
assert_true(tg.complete)
@image_comparison(["red_text"], tol=13)
def test_theme4():
# Incomplete theme should have the default theme plus additinal theme
# elements.
print(simple_gg + theme(axis_text=element_text(color="red", size=50, angle=90)))
def test_theme5():
# complete theme t2 replaces partial theme t2
t1 = theme_gray()
t2 = theme(text=element_text())
t3 = t2 + t1
assert_true(t3.complete)
def test_theme6():
# partial theme t2 is combined with complete theme t1
t1 = theme_gray()
t2 = theme(text=element_text())
t3 = t1 + t2
assert_equal(t3.element_themes, t2.element_themes)
def test_theme7():
# partial themes should be combined for later application to a complete
# theme
t1 = theme(text=element_text())
t2 = theme(axis_text=element_text())
t3 = t1 + t2
assert_equal(t3.element_themes, t1.element_themes + t2.element_themes)
# based on examples from http://docs.ggplot2.org/current/theme.html
gg = ggplot(aes(x='mpg', y='wt'), data=mtcars) + geom_point()
@image_comparison(["general_first"], tol=13)
def test_theme8():
print(simple_gg +
theme(text=element_text(color="red", size=50, angle=45)) +
theme(axis_text_y=element_text(color="green")) +
theme(axis_title=element_text(color="blue")))
@image_comparison(["general_last"], tol=13)
def test_theme9():
print(simple_gg +
theme(axis_text_y=element_text(color="green")) +
theme(axis_title=element_text(color="blue")) +
theme(text=element_text(color="red", size=50, angle=-45)))
def test_theme10():
assert_raises(TypeError, lambda: theme() + gg)
| bsd-2-clause |
ebilionis/py-best | demo/test_treed_gp.py | 1 | 1557 | """Test the KOSolver class.
Author:
Ilias Bilionis
Date:
12/2/2012
"""
if __name__ == '__main__':
import fix_path
from examples.ko import KOSolver
from best.gp import TreedMultioutputGaussianProcess
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Initialize the solver
solver = KOSolver(k=2, T=[0, 1], n_t=32)
# Initialize the treed GP
tmgp = TreedMultioutputGaussianProcess(solver=solver)
tmgp.num_xi_init = 10
tmgp.num_xi_test = 100
tmgp.num_max = 100
tmgp.num_elm_max = 20
tmgp.verbose = True
tmgp.model.sample_g = True
tmgp.model.num_mcmc = 1
tmgp.model.num_init = 100
# Initialial hyper-parameters
init_hyp = np.array([.1, .1, .1, 1e-1, 1e-1])
tmgp.init_hyp = init_hyp
tmgp.num_mcmc = 100
# Train
tmgp.train()
# Print the tree
print str(tmgp.tree)
# A fine scale solver to test our predictions
fine_solver = KOSolver(k=solver.k_of[0], n_t=50)
# Make predictions
for i in range(10):
xi = np.random.rand(1, solver.k_of[0])
X = [xi] + fine_solver.X_fixed
H = tmgp.mean_model(X)
n = np.prod([x.shape[0] for x in X])
Yp = np.ndarray((n, solver.q), order='F')
Vp = np.ndarray((n, solver.q), order='F')
tmgp(X, H, Yp, Vp)
Y = fine_solver(xi[0, :])
plt.plot(fine_solver.X_fixed[0], Y)
E = 2. * np.sqrt(Vp)
for i in range(solver.q):
plt.errorbar(fine_solver.X_fixed[0], Yp[:, i], yerr=E[:, i])
plt.show() | lgpl-3.0 |
ycaihua/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
dalek7/Algorithms | Optimization/tensorflow-linear_regression/04-minimizing_cost_gradient_update.py | 1 | 1302 | # Lab 3 Minimizing Cost
import tensorflow as tf
import matplotlib.pyplot as plt
tf.set_random_seed(777) # for reproducibility
x_data = [1, 2, 3]
y_data = [1, 2, 3]
# Try to find values for W and b to compute y_data = W * x_data + b
# We know that W should be 1 and b should be 0
# But let's use TensorFlow to figure it out
W = tf.Variable(tf.random_normal([1]), name='weight')
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# Our hypothesis for linear model X * W
hypothesis = X * W
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize: Gradient Descent using derivative: W -= learning_rate * derivative
learning_rate = 0.1
gradient = tf.reduce_mean((W * X - Y) * X)
descent = W - learning_rate * gradient
update = W.assign(descent)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
w_history = []
steps =[]
for step in range(21):
sess.run(update, feed_dict={X: x_data, Y: y_data})
curr_cost = sess.run(cost, feed_dict={X: x_data, Y: y_data})
curr_W = sess.run(W)[0]
w_history.append(curr_W)
steps.append(step)
print(step, curr_cost, curr_W)
# Show the cost function
plt.plot( steps, w_history)
plt.title('W', fontsize=10)
plt.show() | mit |
vshtanko/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/hatch.py | 10 | 7132 | """
Contains a classes for generating hatch patterns.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.path import Path
class HatchPatternBase:
"""
The base class for a hatch pattern.
"""
pass
class HorizontalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = 0.0
vertices[0::2, 1] = steps
vertices[1::2, 0] = 1.0
vertices[1::2, 1] = steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class VerticalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = steps
vertices[0::2, 1] = 0.0
vertices[1::2, 0] = steps
vertices[1::2, 1] = 1.0
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class NorthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('/') + hatch.count('x') +
hatch.count('X')) * density)
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 0.0 - steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 1.0 - steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class SouthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('\\') + hatch.count('x') +
hatch.count('X')) * density)
self.num_vertices = (self.num_lines + 1) * 2
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 1.0 + steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 0.0 + steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class Shapes(HatchPatternBase):
filled = False
def __init__(self, hatch, density):
if self.num_rows == 0:
self.num_shapes = 0
self.num_vertices = 0
else:
self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
(self.num_rows // 2) * (self.num_rows))
self.num_vertices = (self.num_shapes *
len(self.shape_vertices) *
(self.filled and 1 or 2))
def set_vertices_and_codes(self, vertices, codes):
offset = 1.0 / self.num_rows
shape_vertices = self.shape_vertices * offset * self.size
if not self.filled:
inner_vertices = shape_vertices[::-1] * 0.9
shape_codes = self.shape_codes
shape_size = len(shape_vertices)
cursor = 0
for row in xrange(self.num_rows + 1):
if row % 2 == 0:
cols = np.linspace(0.0, 1.0, self.num_rows + 1, True)
else:
cols = np.linspace(offset / 2.0, 1.0 - offset / 2.0,
self.num_rows, True)
row_pos = row * offset
for col_pos in cols:
vertices[cursor:cursor + shape_size] = (shape_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
if not self.filled:
vertices[cursor:cursor + shape_size] = (inner_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
class Circles(Shapes):
def __init__(self, hatch, density):
path = Path.unit_circle()
self.shape_vertices = path.vertices
self.shape_codes = path.codes
Shapes.__init__(self, hatch, density)
class SmallCircles(Circles):
size = 0.2
def __init__(self, hatch, density):
self.num_rows = (hatch.count('o')) * density
Circles.__init__(self, hatch, density)
class LargeCircles(Circles):
size = 0.35
def __init__(self, hatch, density):
self.num_rows = (hatch.count('O')) * density
Circles.__init__(self, hatch, density)
class SmallFilledCircles(SmallCircles):
size = 0.1
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('.')) * density
Circles.__init__(self, hatch, density)
class Stars(Shapes):
size = 1.0 / 3.0
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('*')) * density
path = Path.unit_regular_star(5)
self.shape_vertices = path.vertices
self.shape_codes = np.ones(len(self.shape_vertices)) * Path.LINETO
self.shape_codes[0] = Path.MOVETO
Shapes.__init__(self, hatch, density)
_hatch_types = [
HorizontalHatch,
VerticalHatch,
NorthEastHatch,
SouthEastHatch,
SmallCircles,
LargeCircles,
SmallFilledCircles,
Stars
]
def get_path(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates Path to render
the hatch in a unit square. *density* is the number of lines per
unit square.
"""
density = int(density)
patterns = [hatch_type(hatchpattern, density)
for hatch_type in _hatch_types]
num_vertices = sum([pattern.num_vertices for pattern in patterns])
if num_vertices == 0:
return Path(np.empty((0, 2)))
vertices = np.empty((num_vertices, 2))
codes = np.empty((num_vertices,), np.uint8)
cursor = 0
for pattern in patterns:
if pattern.num_vertices != 0:
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
cursor += pattern.num_vertices
return Path(vertices, codes)
| gpl-2.0 |
xubenben/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
DiamondLightSource/FTIR-Filtering-Experimental | Mark FTIR1/src/ft_Opus_Equivalent.py | 1 | 13162 | import h5py
import numpy as np
import matplotlib.pyplot as plt
import cmath as m
from scipy import signal
import pylab
from myClasses import fft
# ft = fft()
#
#
# f = h5py.File("/home/flb41892/data/Nexus different parameters/NLC on Res2 ZF4 HFL7899 PR32.0.nxs","r")
# s = f["entry1/instrument/interferometer/sample_interferogram_scan"][...] #signal on which to perform FFT
# #ref = f["entry1/instrument/interferometer/reference_scan"][...] #noise signal
# highfold = f['/entry1/instrument/interferometer/opus_parameters/instrument/high_folding_limit'][...]
#
# zerofill = f['/entry1/instrument/interferometer/opus_parameters/ft/zero_filling_factor'][...]
# zerofill =np.asarray(zerofill, int)
# refer = f['/entry1/instrument/interferometer/reference_scan'][...] #reference scan
# renergy = f['/entry1/instrument/interferometer/reference_energy'][...] #reference energy
# rint = f['/entry1/instrument/interferometer/reference_interferogram_scan'][...] #reference scan
# com = f["entry1/instrument/interferometer/sample_scan"][...]# this is the FT of the same file, as performed by opus
#
# axis = f["entry1/instrument/interferometer/sample_energy"][...] #signal on which to perform FFT
#
# ymax = f['/entry1/instrument/interferometer/opus_parameters/sample_data_interferogram/y_maximum'][...]
# yscaling = f['/entry1/instrument/interferometer/opus_parameters/sample_data_interferogram/y_scaling_factor'][...]
# ymaxspect = f['/entry1/instrument/interferometer/opus_parameters/sample_data/y_maximum'][...]
#n = 13 #choose the index of the interferogram you want to analyse
ft = fft()
f = h5py.File("/home/flb41892/data/markmapping /4 scan 3 00um.0.hdf5","r") # load you nexus file here
s = f["Data/SampleInterferogram/yfolded"][...] #signal on which to perform FFT
s = s[20,20,:]
com = f["/Data/Sample/yfolded"][...]# this is the FT of the same file, as performed by opus
com = com[1,2,:]
highfold = f['/Parameters/Instrument/High Folding Limit'][...]
zerofill = f['/Parameters/FT/Zero Filling Factor'][...]
zerofill =np.asarray(zerofill, float)
refer = f['/Data/ReferenceInterferogram/yfolded'][...] #reference scan
refer = refer[40,30,:]
klaser = f['/Parameters/Instrument/Laser Wavenumber'][...]
#renergy = f["entry1/instrument/interferometer/reference_energy"][...] # energy axis of reference scan
#absenergy = f["entry1/instrument/interferometer/ratio_absorbance_energy"][...] # energy axis of reference scan
ymax = f['/Parameters/SampleDataInterferogram/Y - Maximum'][...] #max amplitude of interferogram processed by Opus
yscaling = f['/Parameters/SampleDataInterferogram/Y - Scaling Factor'][...] #scaling factor that Opus applies to each intererigram before processing it.
ymaxspect = f['/Parameters/SampleData/Y - Maximum'][...]#scaling factor that Opus applies to the final spectrum before plotting it.
axis = f["/Data/Sample/x"][...] #energy axis from Opus
s = refer
single = s[0:0.5*s.size] #in case of bifringent interferogram, take only one peak to analyse (avoids sinusoidal modulations)
#zero filling(pad until 16,384 if array is below this number and up to 65536 points if array is larger)
single = single - np.mean(single) # eliminate offset of interferogram
single = single*yscaling/(s.max()/ymax)
if highfold < 3950.0:
if 16384<single.size < 32768:
if zerofill < 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(65536-single.size)))
if 8192<single.size < 16384:
if zerofill < 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if 4096<single.size < 8192:
if zerofill < 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if 2048<single.size < 4096:
if zerofill < 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if 1024<single.size < 2048:
if zerofill < 4:
single = np.concatenate((single,np.zeros(2048-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
if single.size < 1024:
if zerofill < 4:
single = np.concatenate((single,np.zeros(1024-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(2048-single.size)))
single = single*4
if 3950.0<highfold <7900.0:
if 16384<single.size < 32768:
if zerofill < 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(65536-single.size)))
if 8192<single.size < 16384:
if zerofill < 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if 4096<single.size < 8192:
if zerofill < 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if 2048<single.size < 4096:
if zerofill < 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if single.size < 2048:
if zerofill < 4:
single = np.concatenate((single,np.zeros(2048-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
single = single*2
if 7900.0<highfold <15800.0:
if 16384<single.size < 32768:
if zerofill < 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(65536-single.size)))
if 8192<single.size < 16384:
if zerofill < 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(32768-single.size)))
if 4096<single.size < 8192:
if zerofill < 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(16384-single.size)))
if 2048<single.size < 4096:
if zerofill < 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(8192-single.size)))
if single.size < 2048:
if zerofill < 4:
single = np.concatenate((single,np.zeros(2048-single.size)))
if zerofill == 4:
single = np.concatenate((single,np.zeros(4096-single.size)))
#phase correction -Mertz method
n = 256 # number of points to select for phase correction about ZPD point
zeros = np.zeros(2*n)#make array of zeros of same length as the signal to be analysed
zeros[:] = single[np.argmax(single)-n:np.argmax(single)+n]
#ramp function (ramp is old triangular fcn, better to use the Black Harris 3 step fct w[t] )
ramp = np.zeros(2*n)
ramp[0:n] = np.linspace(0,1,n,endpoint=False)
ramp[n:] = np.linspace(1,0,n)
N = 2*n
w = np.zeros(N)
w2 = np.zeros(N)
for j in range(0,N):
w[j] = 0.44959-.49364*np.cos(2*m.pi*j/N)+.05677*np.cos(4*m.pi*j/N)
w2[j] = 0.42323-.49755*np.cos(2*m.pi*j/N)+.07922*np.cos(4*m.pi*j/N)
zeros = zeros*ramp #mpllultiply zeros array by ramp fcn to prepare array for phase correction
#rearrange data, so that right side of data(including peak) is moved to front of array and left hand side
#is moved to the back
#rotate the 512 long array
interf = []
interf[0:n] = zeros[np.argmax(zeros):zeros.size]
interf[n:]=zeros[0:np.argmax(zeros)]
ones = np.ones(np.size(interf))
ones[25:60] = np.linspace(0.5,0,35, endpoint = False)
ones[460:500] = np.linspace(0,0.5, 40)
interf1 = interf * ones
#frequency axis
lmda = 1./highfold#cm
k = np.arange(np.size(single))
v = np.divide(2.0*k,lmda*np.size(single)) # f = k/(N*lambda) where k is range of values from zero to array size,
kbig = np.arange(np.size(single))
vbig = np.divide(kbig,lmda*np.size(single))
#N is number of points in interferogram
#fourier transform
output_axis= np.size(interf)
trans= np.fft.fft(interf, output_axis)
#reff= np.fft.rfft(ref,output_axis)
#decompose into real and imaginary parts of fourier spectrum
freal= np.real(trans)
fim= np.imag(trans)
#reffr = np.abs(reff)#do same with reference set
#calculate phase angle
phi = np.arctan(np.divide(fim,freal))
cphi = np.cos(phi)
sphi = np.sin(phi)
pw = np.sqrt(np.add(np.square(freal) , np.square(fim)))
frealp = freal*pw
fimp = fim*pw
#apodization using a Black Harris 3 term fcn
apodf = np.zeros(single.size) #61dB
apodf2 = np.zeros(single.size) #67 dB
for j in range(0,single.size):
apodf[j] = 0.44959-.49364*np.cos(2*m.pi*j/single.size)+.05677*np.cos(4*m.pi*j/single.size)
apodf2[j] = 0.42323-.49755*np.cos(2*m.pi*j/single.size)+.07922*np.cos(4*m.pi*j/single.size)
ins = ((np.size(single)-np.argmax(single)) - (np.argmax(single)))/2
single = np.insert(single,0,np.zeros(ins))
single = single[:np.size(single)-ins]
single = single *apodf2
apod_singler = np.zeros(np.size(single))
apod_singler[0:single.size-np.argmax(single)] = single[np.argmax(single):single.size]
apod_singler[single.size-np.argmax(single):]=single[0:np.argmax(single)]
apod_singler2 = apod_singler
#apod_singler2[0:1500] =np.zeros(1500)
#implement black harris inverse filter
blh = np.ones(100)-np.blackman(100)
c = 100
m = 0.0
np.insert(blh,np.argmin(blh),np.multiply(m,np.ones(c)))
#apod_singler2[-1500:] =np.zeros(1500)
exp = np.linspace(0,np.size(apod_singler2),np.size(apod_singler2))
exp[0:np.size(exp)] = np.exp(-exp/150)
f = exp[::-1]
exp[np.size(exp)/2:] = f[np.size(exp)/2:]
l = 300
exp[np.size(exp)/2-l:np.size(exp)/2+l] = np.ones(2*l)
#smoothen out middle of pass filter using gaussian fcn
d = signal.gaussian(200,33)
exp[np.argmin(exp):np.argmin(exp)+100]= d[0:np.size(d)/2]
exp[np.argmin(exp)-100:np.argmin(exp)] = d[np.size(d)/2:]
apod_singler2 = np.multiply(apod_singler2,exp)
#can zerofill most on interferogram (useful to determine where the secondary peaks are)
#output_axis1 = single.size
#c = 300
#apod_singler2[c:np.size(apod_singler2)-c] = np.zeros(np.size(apod_singler2)-2*c)
#FFT the interferogram which was previously apodized and rotated
#extend phase arrays to match interferogram arrays(interpolation)
xp = np.arange(0,2*n)
x = np.arange(0,2*n,512./single.size)
cphi2 = np.interp(x,xp,cphi)
sphi2 = np.interp(x,xp,sphi)
#power spectrum
output_axis1 = np.size(apod_singler)
apodfi = np.fft.fft(apod_singler, output_axis1)
apodr = np.real(apodfi)
apodi = np.imag(apodfi)
#see difference between non eliminated and eliminated secondary fringes
#with fringe elim:
apodfi2 = np.fft.fft(apod_singler2, output_axis1)
apodr2 = np.real(apodfi2)
apodi2 = np.imag(apodfi2)
#multiply the complex fourier transform by the complex phase correction factor
finalr = np.multiply(apodr,cphi2)
finali = np.multiply(apodi,sphi2)
finalr2 = np.multiply(apodr2,cphi2)
finali2 = np.multiply(apodi2,sphi2)
final = np.add(finalr,finali)
final2 = np.add(finalr2,finali2)
#average two sided interferogram results
schannel = ft.singleChannel(s[0.5*s.size:], highfold,zerofill,ymax,s.max(),yscaling,ymaxspect)
#final = np.add(final,schannel[0])
#final = np.true_divide(final,2)
a = np.add(v,-axis[0])
a = np.abs(a)
a = a.argmin()
b = np.add(v,-axis[axis.size-1])
b = np.abs(b)
b = b.argmin()
full = final
final = final[a-1:b+1]
final = np.multiply(final,com.max()/final.max())
refer = ft.singleChannel2(refer,highfold,zerofill,ymax,s.max(),yscaling,ymaxspect)
absorbance = -np.log10(schannel[0]/refer[0])
#normalisation wrt. highest peak
#plt.plot(axis,final*ymaxspect/final.max())
#plt.plot(axis,final)
#plt.plot(axis,com)
plt.plot(sphi,label ='sine of phase angle at position (40,30)')
pylab.legend(loc = 'upper right')
plt.show()
#comparison with Opus FFT
#dif = np.add(final_transf,-com)
#transmission calculation
#t=trans/reffr
#absorption
#a = -np.log10(t)
#plot graphs
#f,(ax1,ax2) = plt.subplots(2)
#ax1.plot(single)
#ax1.set_title("Initial Signal Slice")
#ax2.plot(frealp,'g',fimp,'r')
#ax2.set_title("FFT of Initial Signal Slice")
#pylab.plot(apodf2, label = 'Original Oscillation Plagued Spectrum')
# pylab.xlabel("cell index ", fontsize = 17 )
# pylab.ylabel("BH3 Filter", fontsize = 17)
#pylab.legend(loc = 'upper right')
'''
Created on 17 Jun 2015
@author: flb41892
'''
| apache-2.0 |
jacob-ruth/RuthNote | midi_to_numpy.py | 1 | 7112 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 23:27:58 2017
@author: danjr
"""
'''
Script to turn a midi file into a piano roll-like numpy array.
It'll likely handle multiple simultaneous notes within a single midi track
incorrectly.
'''
import mido
import numpy as np
import matplotlib.pyplot as plt
# midi file to read
filepath = r'bach\chorales\01ausmei.mid'
midi_file = mido.MidiFile(filepath)
'''
Initialize the piano roll matrix
'''
num_notes = 100
# find all the tempos in the piece, and as the worst-case scenario assume the overall tempo is the fastest one
tempos = list(map(lambda x: x.tempo, filter(lambda message: message.type=='set_tempo', [item for sublist in midi_file.tracks for item in sublist])))
tempo = min(tempos)
#pretty sure this function just multiplies all these numbers together but oh well
num_ticks = int(mido.second2tick(midi_file.length,midi_file.ticks_per_beat, tempo))
piano_roll = np.zeros([num_notes,num_ticks*2])
for ti,track in enumerate(midi_file.tracks):
num_messages = len(track)
'''
Look for note_on/note_off pairs, and turn "on" the corresponding values in
piano_roll.
'''
for mi in range(num_messages):
message = track[mi]
if (message.type=='note_on') and (message.velocity!=0):
'''
This message designates the start of a note. Once the message
designating the end of the note is found, the piano roll will be
updated to include the note.
'''
on_message = message
'''
Find the tick marker for the start of the note. This is the sum of
all the previous messages' timings, plus this timing.
'''
times_before_on = list(map(lambda message: message.time,filter(lambda message: message.type=='note_on' or message.type=='note_off',track[0:mi+1])))
start_tick = np.sum(times_before_on)
# look for the corresponding "note_off" message in the subsequent messages
found_note_off = False
mii = mi + 1 # start by looking at the next message; this will likely be the "off" message
while found_note_off == False:
# check to see if this message is the "off" message
if ((track[mii].type == 'note_off') | (hasattr(track[mii], 'velocity') and track[mii].velocity==0)) and (track[mii].note == message.note):
'''
The corresponding "off" message has been found.
Find the duration of the note (the starting and ending
ticks) and update piano_roll.
'''
found_note_off = True
off_message = track[mii]
'''
The note's off tick is the sum of all previous messages'
ticks plus this timing.
'''
times_before_off = list(map(lambda message: message.time,filter(lambda message: message.type=='note_on' or message.type=='note_off',track[0:mii+1])))
end_tick = np.sum(times_before_off)
piano_roll[on_message.note,start_tick:end_tick] = ti # for now, set the value to the track number so different tracks are distinguishable in the matrix plot
else:
'''
This is not the corresponding "off" message. Update the
index to look at the next message.
'''
mii = mii+1
if mii == num_messages:
'''
In case it finds an "orphan" note_on message -- for now,
just ignore it.
'''
found_note_off = True # just so it stops looking
print('Warning, did not find an "off" message for the following message:')
print(message)
'''
Remove the initial empty columns -- why do these exist?
'''
print('Removing initial empty columns.')
sum_cols = np.sum(piano_roll,axis=0)
first_non_empty = np.nonzero(sum_cols)[0][0]
piano_roll = piano_roll[:,first_non_empty::]
print('The piano roll now has shape:')
print(piano_roll.shape)
'''
Remove the trailing empty columns. Shouldn't be an issue once dynamic sizing
is implemented.
'''
print('Removing trailing empty columns.')
sum_cols = np.sum(piano_roll,axis=0)
last_non_empty = np.nonzero(sum_cols)[0][-1]
piano_roll = piano_roll[:,0:last_non_empty]
print('The piano roll now has shape:')
print(piano_roll.shape)
# plot the raw piano roll
fig = plt.figure()
ax_orig = fig.add_subplot(111)
ax_orig.imshow(piano_roll,aspect='auto',origin='bottom')
ax_orig.set_title('Original piano roll')
'''
See which columns are duplicates of the previous one.
'''
is_dup = np.zeros(piano_roll.shape[1])
for tick in range(piano_roll.shape[1]):
if tick>0 and np.array_equal(piano_roll[:,tick],piano_roll[:,tick-1]):
is_dup[tick] = 1
'''
Find the minimum spacing between columns that aren't unique. This will define
the maximum amount of down-sampling possible without losing information.
'''
new_cols = np.where(is_dup==0) # indices of columns that aren't just duplicates of the previous
new_cols_diff = np.diff(new_cols)
resample_to = 30
# plot the distribution of spacings
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(new_cols_diff.transpose(),bins=np.arange(np.max(new_cols_diff)))
ax.set_ylabel('Number of occurances')
ax.set_xlabel('Spacing between non-duplicate columns in piano_roll')
'''
Downsample
'''
def any_notes_on(chunk):
'''
given a vertical slice of a piano roll, return which notes are on for any
portion of the slice
'''
note_occurances = np.sum(chunk,axis=1)
on_notes = np.array(list(map(lambda x: 1 if x>0 else 0, note_occurances)))
return on_notes
# with resample_to defined, get the divisions of the "chunks" to downsample
spacings = np.arange(0,np.shape(piano_roll)[1],resample_to,dtype=int)
end_idxs = spacings[1::]
start_idxs = spacings[:-1]
# illustrate the chunks on the original matrix plot of the piano roll.
# this can be used to check visually that they downsampling is in the right place
[ax_orig.axvline(s) for s in spacings]
# for each chunk generate the resampled array, and add to the resampled roll
piano_roll_resampled = np.zeros([num_notes,len(start_idxs)])
for i,start_idx in enumerate(start_idxs):
end_idx = end_idxs[i]
chunk = piano_roll[:,start_idx:end_idx-1]
on_notes = any_notes_on(chunk)
piano_roll_resampled[:,i] = on_notes
print('The downsampled piano roll has shape:')
print(piano_roll_resampled.shape)
# Plot the resampled piano roll.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(piano_roll_resampled,aspect='auto',origin='bottom')
ax.set_title('Resampled piano roll') | mit |
kdebrab/pandas | pandas/conftest.py | 1 | 9488 | import os
import importlib
import pytest
import pandas
import numpy as np
import pandas as pd
from pandas.compat import PY3
import pandas.util._test_decorators as td
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--skip-network", action="store_true",
help="skip network tests")
parser.addoption("--run-high-memory", action="store_true",
help="run high memory tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")
parser.addoption("--strict-data-files", action="store_true",
help="Fail if a test is skipped for missing data file.")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if 'network' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if 'high_memory' in item.keywords and not item.config.getoption(
"--run-high-memory"):
pytest.skip(
"skipping high memory test since --run-high-memory was not set")
# Configurations for all tests and all test modules
@pytest.fixture(autouse=True)
def configure_tests():
pd.set_option('chained_assignment', 'raise')
# For running doctests: make np and pd names available
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['pd'] = pd
@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
def spmatrix(request):
from scipy import sparse
return getattr(sparse, request.param + '_matrix')
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell()
@pytest.fixture(params=[True, False, None])
def observed(request):
""" pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatiblity
if we decide to change the default (and would need to warn if this
parameter is not passed)"""
return request.param
_all_arithmetic_operators = ['__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
'__floordiv__', '__rfloordiv__',
'__truediv__', '__rtruediv__',
'__pow__', '__rpow__',
'__mod__', '__rmod__']
if not PY3:
_all_arithmetic_operators.extend(['__div__', '__rdiv__'])
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=['__eq__', '__ne__', '__le__',
'__lt__', '__ge__', '__gt__'])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression(request):
"""
Fixture for trying common compression types in compression tests
"""
return request.param
@pytest.fixture(params=['gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable
"""
return request.param
@pytest.fixture(scope='module')
def datetime_tz_utc():
from datetime import timezone
return timezone.utc
@pytest.fixture(params=['inner', 'outer', 'left', 'right'])
def join_type(request):
"""
Fixture for trying all types of join operations
"""
return request.param
@pytest.fixture
def datapath(request):
"""Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path : path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), 'tests')
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if request.config.getoption("--strict-data-files"):
msg = "Could not find file {} and --strict-data-files is set."
raise ValueError(msg.format(path))
else:
msg = "Could not find {}."
pytest.skip(msg.format(path))
return path
return deco
@pytest.fixture
def iris(datapath):
"""The iris dataset as a DataFrame."""
return pandas.read_csv(datapath('data', 'iris.csv'))
@pytest.fixture(params=['nlargest', 'nsmallest'])
def nselect_method(request):
"""
Fixture for trying all nselect methods
"""
return request.param
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def closed(request):
"""
Fixture for trying all interval closed parameters
"""
return request.param
@pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')])
def nulls_fixture(request):
"""
Fixture for each null type in pandas
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
TIMEZONES = [None, 'UTC', 'US/Eastern', 'Asia/Tokyo', 'dateutil/US/Pacific',
'dateutil/Asia/Singapore']
@td.parametrize_fixture_doc(str(TIMEZONES))
@pytest.fixture(params=TIMEZONES)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONES[1:]))
@pytest.fixture(params=TIMEZONES[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"]
SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
FLOAT_DTYPES = [float, "float32", "float64"]
COMPLEX_DTYPES = [complex, "complex64", "complex128"]
STRING_DTYPES = [str, 'str', 'U']
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = ALL_REAL_DTYPES + COMPLEX_DTYPES + STRING_DTYPES
@pytest.fixture(params=STRING_DTYPES)
def string_dtype(request):
"""Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float32
* float64
"""
return request.param
@pytest.fixture(params=COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex64
* complex128
"""
return request.param
@pytest.fixture(params=SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int8
* int16
* int32
* int64
"""
return request.param
@pytest.fixture(params=UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* uint8
* uint16
* uint32
* uint64
"""
return request.param
@pytest.fixture(params=ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
"""
return request.param
@pytest.fixture(params=ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
"""
return request.param
@pytest.fixture(params=ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
* complex64
* complex128
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture
def mock():
"""
Fixture providing the 'mock' module.
Uses 'unittest.mock' for Python 3. Attempts to import the 3rd party 'mock'
package for Python 2, skipping if not present.
"""
if PY3:
return importlib.import_module("unittest.mock")
else:
return pytest.importorskip("mock")
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/tree/tests/test_export.py | 9 | 2889 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
tseaver/google-cloud-python | videointelligence/docs/conf.py | 2 | 11993 | # -*- coding: utf-8 -*-
#
# google-cloud-video-intelligence documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-video-intelligence"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-video-intelligence-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-video-intelligence.tex",
u"google-cloud-video-intelligence Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-video-intelligence",
u"google-cloud-video-intelligence Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-video-intelligence",
u"google-cloud-video-intelligence Documentation",
author,
"google-cloud-video-intelligence",
"GAPIC library for the {metadata.shortName} v1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/master/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
soulmachine/scikit-learn | sklearn/datasets/base.py | 3 | 17761 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename, 'rb').read() for filename in filenames]
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
descr = open(join(module_path, 'descr', 'digits.rst')).read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data',
'boston_house_prices.csv')))
fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst'))
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=fdescr.read())
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
waltervh/BornAgain | Examples/python/simulation/ex06_Reflectometry/BeamAngularDivergence.py | 2 | 3185 | """
An example of taking into account beam angular divergence
and beam footprint correction in reflectometry calculations
with BornAgain.
"""
import numpy as np
import bornagain as ba
from os import path
# input parameters
wavelength = 1.54 * ba.angstrom
alpha_i_min = 0.0 * ba.deg # min incident angle, deg
alpha_i_max = 2.0 * ba.deg # max incident angle, rad
beam_sample_ratio = 0.01 # beam-to-sample size ratio
# convolution parameters
d_ang = 0.01 * ba.deg # spread width for incident angle
n_sig = 3 # number of sigmas to convolve over
n_points = 25 # number of points to convolve over
# substrate (Si)
si_sld_real = 2.0704e-06 # \AA^{-2}
# layer parameters
n_repetitions = 10
# Ni
ni_sld_real = 9.4245e-06 # \AA^{-2}
d_ni = 70 * ba.angstrom
# Ti
ti_sld_real = -1.9493e-06 # \AA^{-2}
d_ti = 30 * ba.angstrom
def get_sample():
# defining materials
m_air = ba.MaterialBySLD("Air", 0.0, 0.0)
m_ni = ba.MaterialBySLD("Ni", ni_sld_real, 0.0)
m_ti = ba.MaterialBySLD("Ti", ti_sld_real, 0.0)
m_substrate = ba.MaterialBySLD("SiSubstrate", si_sld_real, 0.0)
air_layer = ba.Layer(m_air)
ni_layer = ba.Layer(m_ni, d_ni)
ti_layer = ba.Layer(m_ti, d_ti)
substrate_layer = ba.Layer(m_substrate)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
for i in range(n_repetitions):
multi_layer.addLayer(ti_layer)
multi_layer.addLayer(ni_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def create_real_data():
"""
Loading data from genx_angular_divergence.dat
"""
filepath = path.join(path.dirname(path.realpath(__file__)),
"genx_angular_divergence.dat.gz")
ax_values, real_data = np.loadtxt(filepath,
usecols=(0, 1), skiprows=3, unpack=True)
# translating axis values from double incident angle # to incident angle
ax_values *= 0.5
return ax_values, real_data
def get_simulation(scan_size=500):
"""
Returns a specular simulation with beam and detector defined.
"""
footprint = ba.FootprintFactorSquare(beam_sample_ratio)
alpha_distr = ba.RangedDistributionGaussian(n_points, n_sig)
scan = ba.AngularSpecScan(wavelength, scan_size, alpha_i_min, alpha_i_max)
scan.setFootprintFactor(footprint)
scan.setAbsoluteAngularResolution(alpha_distr, d_ang)
simulation = ba.SpecularSimulation()
simulation.setScan(scan)
return simulation
def run_simulation():
"""
Runs simulation and returns it.
"""
sample = get_sample()
simulation = get_simulation()
simulation.setSample(sample)
simulation.runSimulation()
return simulation.result()
def plot(results):
"""
:param results:
:return:
"""
from matplotlib import pyplot as plt
ba.plot_simulation_result(results, postpone_show=True)
genx_axis, genx_values = create_real_data()
plt.semilogy(genx_axis, genx_values, 'ko', markevery=300)
plt.legend(['BornAgain',
'GenX'],
loc='upper right')
plt.show()
if __name__ == '__main__':
results = run_simulation()
plot(results)
| gpl-3.0 |
meteoswiss-mdr/precipattractor | pyscripts/growthdecay/plotting_functions.py | 1 | 7844 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
from PIL import Image
from mpl_toolkits.axes_grid1 import make_axes_locatable
import gis_base as gis
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import stat_tools_attractor as st
import data_tools_attractor as dt
import io_tools_attractor as io
import maple_dataload
geo = maple_dataload.generate_geo()
cmaps = maple_dataload.generate_colormaps()
def colorbar(mappable,r0,label=r' mm h$^{-1}$',labelsize=13,ticklabelsize=10):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(mappable, cax=cax, ticks=r0.clevs, spacing='uniform', norm=r0.norm, extend='max', orientation='vertical')
cbar.ax.tick_params(labelsize=labelsize)
cbar.set_ticklabels(r0.clevsStr, update_ticks=True)
cax.text(1,0.5,clabel,fontsize=labelsize,transform=cax.transAxes,ha='left',va='top',rotation='vertical')
def set_axis_off(a):
a.set_xticks([])
a.set_yticks([])
a.set_xticklabels([])
a.set_yticklabels([])
def plot_frames_paper(startStr,t,r0,data,f,g,minR=0.01, plotmember=[0], gd_field=None, figsize=(16,5),labelsize=14):
plt.close()
# extract radar masks
nanmask = f.getmask
alphav = 0.5
proj4stringWGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84"
proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
+k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
dem_path = '/store/mch/msrad/radar/precip_attractor/gis_data/dem/ccs4.png'
dem = Image.open(dem_path)
dem = dt.extract_middle_domain_img(dem, 512, 512)
dem = dem.convert('P')
nMembers = len(plotmember)
numberPlots = 2*nMembers+1 # nwc with gd, without gd, observations, gd field
if gd_field is not None:
numberPlots+=1
figmatrix = dt.optimal_size_subplot(numberPlots)
nrRows = figmatrix[0]
nrCols = figmatrix[1]
ratioWidthHeight = nrCols/nrRows
if nrRows == nrCols:
figsize = (11, 9.5)
else:
figsize = (13, 13/ratioWidthHeight)
# figmatrix = [1,3]
fig,axes = plt.subplots(nrows=nrRows, ncols=nrCols, figsize=figsize)
axes_stack = []
# OBS
a = plt.subplot(nrRows, nrCols, 1)
axes_stack.append(a)
fplot = data.obs[t,0,:,:]
fplot[fplot<=minR]=np.nan
obsmask = np.ones(fplot.shape)
obsmask[data.mask==1] = np.nan
a.imshow(dem, extent=r0.extent, vmin=100, vmax=3000, cmap = plt.get_cmap('gray'), alpha=0.8)
obsIm = a.imshow(fplot,extent = r0.extent, interpolation='nearest', cmap=r0.cmap, norm=r0.norm)
a.imshow(obsmask,extent=r0.extent,interpolation='nearest',cmap=r0.cmapMask,alpha=alphav)
plt.text(0,.99,'radar QPE', fontsize=labelsize,transform=a.transAxes, ha = 'left', va='top')
set_axis_off(a)
gis.read_plot_shapefile(geo.fileNameShapefile, geo.proj4stringCH, geo.proj4stringCH, ax=a, linewidth=0.75, alpha=0.5)
a.text(1,0,data.timestamps[t].strftime('%Y-%m-%d %H:%M' ), fontsize=labelsize,transform=a.transAxes, ha = 'right', va='bottom')
# FX
for m in range(0, nMembers):
# NOWCAST without GD
a = plt.subplot(nrRows, nrCols, 2+m*nMembers)
axes_stack.append(a)
if plotmember[m]<0: # ensemble mean
fplot = np.nanmean(f.getnowcast, axis=0)
idxmask = 0
else:
fplot = f.getnowcast[plotmember[m],:,:].copy()
idxmask = plotmember[m]
fplot[fplot<=minR]=np.nan
a.imshow(dem, extent=r0.extent, vmin=100, vmax=3000, cmap = plt.get_cmap('gray'), alpha=0.8)
bayIm = a.imshow(fplot, extent = r0.extent, interpolation='nearest', cmap=r0.cmap, norm=r0.norm)
# a.imshow(nanmask[idxmask,:,:],extent=r0.extent,interpolation='nearest',cmap=r0.cmapMask,alpha=alphav)
plt.text(0.01,.99,'+ growth/decay', fontsize=labelsize,transform=a.transAxes, ha = 'left', va='top', color='r')
if plotmember[m]<0:
plt.text(0.99,.99,'ensemble mean', fontsize=labelsize,transform=a.transAxes, ha = 'right', va='top')
else:
plt.text(0.99,.99,'member ' + str(m), fontsize=labelsize,transform=a.transAxes, ha = 'right', va='top')
set_axis_off(a)
gis.read_plot_shapefile(geo.fileNameShapefile, geo.proj4stringCH, geo.proj4stringCH, ax=a, linewidth=0.75, alpha=0.5)
a.text(1,0,'+%i min' % (t*10), fontsize=labelsize,transform=a.transAxes, ha = 'right', va='bottom')
# NOWCAST with GD
a = plt.subplot(nrRows, nrCols, 3+m*nMembers)
axes_stack.append(a)
if plotmember[m]<0: # ensemble mean
fplot = np.nanmean(g.getnowcast,axis=0)
idxmask = 0
else:
fplot = g.getnowcast[plotmember[m],:,:].copy()
idxmask = plotmember[m]
fplot[fplot<=minR]=np.nan
a.imshow(dem, extent=r0.extent, vmin=100, vmax=3000, cmap = plt.get_cmap('gray'), alpha=0.8)
bayIm = a.imshow(fplot,extent = r0.extent, interpolation='nearest', cmap=r0.cmap, norm=r0.norm)
# a.imshow(nanmask[idxmask,:,:],extent=r0.extent,interpolation='nearest',cmap=r0.cmapMask,alpha=alphav)
plt.text(0.01,.99,'just extrapolation', fontsize=labelsize,transform=a.transAxes, ha = 'left', va='top', color='b')
if plotmember[m]<0:
plt.text(0.99,.99,'ensemble mean', fontsize=labelsize,transform=a.transAxes, ha = 'right', va='top')
else:
plt.text(0.99,.99,'member ' + str(m), fontsize=labelsize,transform=a.transAxes, ha = 'right', va='top')
set_axis_off(a)
gis.read_plot_shapefile(geo.fileNameShapefile, geo.proj4stringCH, geo.proj4stringCH, ax=a, linewidth=0.75, alpha=0.5)
a.text(1,0,'+%i min' % (t*10), fontsize=labelsize,transform=a.transAxes, ha = 'right', va='bottom')
# Plot GD
if gd_field is not None:
a = plt.subplot(nrRows, nrCols, 2+m+nMembers+1)
axes_stack.append(a)
gdIm = plt.imshow(gd_field, extent=r0.extent, cmap=cmaps.cmapLog, norm=cmaps.normLog, interpolation='nearest')
gis.read_plot_shapefile(geo.fileNameShapefile, geo.proj4stringCH, geo.proj4stringCH, ax=a, linewidth=0.75, alpha=0.5)
set_axis_off(a)
plt.text(0.01,.99,'growth/decay field', fontsize=labelsize,transform=a.transAxes, ha = 'left', va='top')
# # Colorbar
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="5%", pad=0.05)
# clb = fig.colorbar(im, cax=cax, ticks=clevs, spacing='uniform', norm=norm, extend='both')
# cbTitle = ' dB'
# clb.ax.set_title(cbTitle, fontsize=18)
# clb.ax.tick_params(labelsize=17)
plt.tight_layout()
# Rainfall colormap
if numberPlots <= 4:
xpos = 0.95
else:
xpos = 0.93
fig.subplots_adjust(right=xpos)
cax = fig.add_axes([xpos, 0.52, 0.015, 0.45])
cbar = fig.colorbar(bayIm, cax=cax, ticks=r0.clevs, spacing='uniform', norm=r0.norm, extend='max')
cbar.ax.tick_params(labelsize=10)
cbar.set_ticklabels(r0.clevsStr, update_ticks=True)
cbar.ax.set_title('mm h$^{-1}$',fontsize=11) #transform=axes[0, nrCols-1].transAxes
# GD colormap
fig.subplots_adjust(right=xpos)
cax = fig.add_axes([xpos, 0.02, 0.015, 0.45])
cbar = fig.colorbar(gdIm, cax=cax, cmap=cmaps.cmapLog, ticks=cmaps.clevsLog, norm=cmaps.normLog, extend='both')
cbar.ax.tick_params(labelsize=10)
cbar.ax.set_title('dB',fontsize=11)
figname = 'tmp/fig_frame_' + startStr + '_' + str(t).zfill(2) + '.png'
return(figname, axes_stack)
| gpl-3.0 |
youssef-emad/shogun | examples/undocumented/python_static/graphical/util.py | 22 | 1225 | """ Utilities for matplotlib examples """
import pylab
from numpy import ones, array, meshgrid, linspace, concatenate, ravel, min, max
from numpy.random import randn
QUITKEY='q'
NUM_EXAMPLES=200
DISTANCE=2
def quit (event):
if event.key==QUITKEY or event.key==QUITKEY.upper():
pylab.close()
def set_title (title):
quitmsg=" (press '"+QUITKEY+"' to quit)"
complete=title+quitmsg
manager=pylab.get_current_fig_manager()
# now we have to wrap the toolkit
if hasattr(manager, 'window'):
if hasattr(manager.window, 'setCaption'): # QT
manager.window.setCaption(complete)
if hasattr(manager.window, 'set_title'): # GTK
manager.window.set_title(complete)
elif hasattr(manager.window, 'title'): # TK
manager.window.title(complete)
def get_traindata():
return concatenate(
(randn(2, NUM_EXAMPLES)+DISTANCE, randn(2, NUM_EXAMPLES)-DISTANCE),
axis=1)
def get_meshgrid(traindata):
x1=linspace(1.2*min(traindata), 1.2*max(traindata), 50)
x2=linspace(1.2*min(traindata), 1.2*max(traindata), 50)
return meshgrid(x1,x2)
def get_testdata(x, y):
return array((ravel(x), ravel(y)))
def get_labels(raw=False):
return concatenate(
(-ones([1, NUM_EXAMPLES]), ones([1, NUM_EXAMPLES])),
axis=1)[0]
| gpl-3.0 |
armandosrz/UdacityNanoMachine | boston_housing/visuals.py | 14 | 4878 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
###########################################
import matplotlib.pyplot as pl
import numpy as np
import sklearn.learning_curve as curves
from sklearn.tree import DecisionTreeRegressor
from sklearn.cross_validation import ShuffleSplit, train_test_split
def ModelLearning(X, y):
""" Calculates the performance of several models with varying sizes of training data.
The learning and testing scores for each model are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0)
# Generate the training set sizes increasing by 50
train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int)
# Create the figure window
fig = pl.figure(figsize=(10,7))
# Create three different models based on max_depth
for k, depth in enumerate([1,3,6,10]):
# Create a Decision tree regressor at max_depth = depth
regressor = DecisionTreeRegressor(max_depth = depth)
# Calculate the training and testing scores
sizes, train_scores, test_scores = curves.learning_curve(regressor, X, y, \
cv = cv, train_sizes = train_sizes, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_std = np.std(train_scores, axis = 1)
train_mean = np.mean(train_scores, axis = 1)
test_std = np.std(test_scores, axis = 1)
test_mean = np.mean(test_scores, axis = 1)
# Subplot the learning curve
ax = fig.add_subplot(2, 2, k+1)
ax.plot(sizes, train_mean, 'o-', color = 'r', label = 'Training Score')
ax.plot(sizes, test_mean, 'o-', color = 'g', label = 'Testing Score')
ax.fill_between(sizes, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
ax.fill_between(sizes, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Labels
ax.set_title('max_depth = %s'%(depth))
ax.set_xlabel('Number of Training Points')
ax.set_ylabel('Score')
ax.set_xlim([0, X.shape[0]*0.8])
ax.set_ylim([-0.05, 1.05])
# Visual aesthetics
ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad = 0.)
fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def ModelComplexity(X, y):
""" Calculates the performance of the model as model complexity increases.
The learning and testing errors rates are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0)
# Vary the max_depth parameter from 1 to 10
max_depth = np.arange(1,11)
# Calculate the training and testing scores
train_scores, test_scores = curves.validation_curve(DecisionTreeRegressor(), X, y, \
param_name = "max_depth", param_range = max_depth, cv = cv, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Plot the validation curve
pl.figure(figsize=(7, 5))
pl.title('Decision Tree Regressor Complexity Performance')
pl.plot(max_depth, train_mean, 'o-', color = 'r', label = 'Training Score')
pl.plot(max_depth, test_mean, 'o-', color = 'g', label = 'Validation Score')
pl.fill_between(max_depth, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
pl.fill_between(max_depth, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Visual aesthetics
pl.legend(loc = 'lower right')
pl.xlabel('Maximum Depth')
pl.ylabel('Score')
pl.ylim([-0.05,1.05])
pl.show()
def PredictTrials(X, y, fitter, data):
""" Performs trials of fitting and predicting data. """
# Store the predicted prices
prices = []
for k in range(10):
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size = 0.2, random_state = k)
# Fit the data
reg = fitter(X_train, y_train)
# Make a prediction
pred = reg.predict([data[0]])[0]
prices.append(pred)
# Result
print "Trial {}: ${:,.2f}".format(k+1, pred)
# Display price range
print "\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)) | apache-2.0 |
jpn--/larch | larch/util/figures.py | 1 | 35574 |
from matplotlib import pyplot as plt
import pandas, numpy
from .plotting import plot_as_svg_xhtml
def pseudo_bar_data(x_bins, y, gap=0):
"""
Parameters
----------
x_bins : array-like, shape=(N+1,)
The bin boundaries
y : array-like, shape=(N,)
The bar heights
Returns
-------
x, y
"""
# midpoints = (x_bins[1:] + x_bins[:-1]) / 2
# widths = x_bins[1:] - x_bins[:-1]
if gap:
x_doubled = numpy.zeros(((x_bins.shape[0] - 1) * 4), dtype=numpy.float)
x_doubled[::4] = x_bins[:-1]
x_doubled[1::4] = x_bins[:-1]
x_doubled[2::4] = x_bins[1:] - gap
x_doubled[3::4] = x_bins[1:] - gap
y_doubled = numpy.zeros(((y.shape[0]) * 4), dtype=y.dtype)
y_doubled[1::4] = y
y_doubled[2::4] = y
else:
x_doubled = numpy.zeros((x_bins.shape[0] - 1) * 2, dtype=x_bins.dtype)
x_doubled[::2] = x_bins[:-1]
x_doubled[1::2] = x_bins[1:]
y_doubled = numpy.zeros((y.shape[0]) * 2, dtype=y.dtype)
y_doubled[::2] = y
y_doubled[1::2] = y
return x_doubled, y_doubled
def distribution_figure(
x,
probability,
choices=None,
availability=None,
xlabel=None,
ylabel='Relative Frequency',
style='hist',
bins=None,
pct_bins=20,
range=None,
prob_label="Modeled",
obs_label="Observed",
bw_method=None,
discrete=None,
ax=None,
format='ax',
accumulator=False,
xscale=None,
xmajorticks=None,
xminorticks=None,
coincidence_ratio=False,
**kwargs,
):
"""
Generate a figure of observed and modeled choices over a range of variable values.
Parameters
----------
x : array-like
An array giving values for some variable.
probability : array-like
The pre-calculated probability array for all cases in this analysis.
Must be the same shape as `x`.
choices : array-like, optional
The observed choices array for all cases in this analysis. If provided,
must be the same shape as `x`.
availability : array-like, optional
The availability array for all cases in this analysis. If provided,
must be the same shape as `x`.
xlabel : str, optional
A label to use for the x-axis of the resulting figure. If not given,
the value of `x.name` is used if it is defined. Set to `False` to omit the
x-axis label even if `x.name` is defined.
ylabel : str, default "Relative Frequency"
A label to use for the y-axis of the resulting figure.
style : {'hist', 'kde'}
The style of figure to produce, either a histogram or a kernel density plot.
bins : int, default 25
The number of bins to use, only applicable to histogram style.
range : 2-tuple, optional
A range to truncate the figure.
prob_label : str, default "Modeled"
A label to put in the legend for the modeled probabilities
obs_label : str, default "Observed"
A label to put in the legend for the observed choices
subselector : str or array-like, optional
A filter to apply to cases. If given as a string, this is loaded from the
model's `dataservice` as an `idco` variable.
ax : matplotlib.Axes, optional
If given, the figure will be drawn on these axes and they will be returned,
otherwise new blank axes are used to draw the figure.
format : {'ax', 'figure', 'svg'}, default 'figure'
How to return the result if it is a figure. The default is to return
the raw matplotlib Axes instance. Change this to `svg` to get a SVG
rendering as an xmle.Elem.
accumulator : bool, default False
Add an net cumulative trend on the bottom.
Returns
-------
Elem or Axes
Returns `ax` if given as an argument, otherwise returns a rendering as an Elem
"""
_coincidence_ratio = None
if xlabel is None:
try:
xlabel = x.name
except AttributeError:
pass
discrete_values = None
if discrete:
discrete_values = numpy.unique(x)
elif discrete is None:
from .histograms import seems_like_discrete_data
discrete, discrete_values = seems_like_discrete_data(numpy.asarray(x).reshape(-1), return_uniques=True)
x_discrete_labels = None if discrete_values is None else [str(i) for i in discrete_values]
if bins is None:
if x_discrete_labels is not None:
# Discrete bins using defined labels
bins = numpy.arange(len(x_discrete_labels)+1)
if isinstance(x.dtype, pandas.CategoricalDtype):
# Discrete bins using implied labels
discrete_values = numpy.arange(len(x_discrete_labels))
bins = numpy.arange(len(x_discrete_labels)+1)
x = x.cat.codes
else:
x_ = x
if availability is not None:
x_ = x[availability != 0]
low_pctile = 0
high_pctile = 100
if range:
import scipy.stats
if range[0] is not None:
low_pctile = scipy.stats.percentileofscore(x_, range[0])
if range[1] is not None:
high_pctile = scipy.stats.percentileofscore(x_, range[1])
if isinstance(pct_bins, int):
bins = numpy.percentile(x_, numpy.linspace(low_pctile, high_pctile, pct_bins + 1))
else:
bins = numpy.percentile(x_, pct_bins)
elif isinstance(bins, int) and availability is not None:
# Equal width bin generation using only available alternatives
x_ = x[availability != 0]
if range:
range_low, range_high = range
if range_low is None:
range_low = x_.min()
if range_high is None:
range_high = x_.max()
else:
range_low = x_.min()
range_high = x_.max()
bins = numpy.linspace(range_low, range_high, bins + 1)
model_result = probability
model_choice = choices
if style == 'kde':
import scipy.stats
kernel_result = scipy.stats.gaussian_kde(x, bw_method=bw_method, weights=model_result.reshape(-1))
common_bw = kernel_result.covariance_factor()
if model_choice is not None:
kernel_choice = scipy.stats.gaussian_kde(x, bw_method=common_bw, weights=model_choice.reshape(-1))
else:
kernel_choice = None
if not range:
x_ = x
if availability is not None:
x_ = x[availability != 0]
range = (x_.min(), x_.max())
x_points = numpy.linspace(*range, 250)
y_points_1 = kernel_result(x_points)
y_points_2 = kernel_choice(x_points)
else:
shift = 0.4 if discrete else 0
gap = 0.2 if discrete else 0
if range:
range_low, range_high = range
if range_low is None:
range_low = x.min()
if range_high is None:
range_high = x.max()
range = (range_low, range_high)
y1, x1 = numpy.histogram(
x,
weights=model_result.reshape(-1),
bins=bins,
range=range,
density=True,
)
x_points, y_points_1 = pseudo_bar_data(x1 - shift, y1, gap=gap)
if model_choice is not None:
y2, x2 = numpy.histogram(
x,
weights=model_choice.reshape(-1),
bins=x1,
density=True,
)
x_points, y_points_2 = pseudo_bar_data(x1 - shift, y2, gap=gap)
if coincidence_ratio:
_coincidence_ratio = numpy.minimum(y1, y2).sum() / numpy.maximum(y1, y2).sum()
if xlabel is False:
xlabel = None
if accumulator and model_choice is not None:
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1], 'hspace':0}, sharex='col')
cum_sum = (y1 - y2).cumsum()
x1plus = (x1[:-1] + x1[1:])/2
ax2.plot(x1plus, cum_sum, color='k', lw=0.5)
ax2.fill_between(x1plus, numpy.fmin(cum_sum, 0), 0, facecolor='#ffa200', label=f"{prob_label} Ahead")
ax2.fill_between(x1plus, numpy.fmax(cum_sum, 0), 0, facecolor='#1f77b4', label=f"{obs_label} Ahead")
ax2.set_yticks([])
ax2.set_ylabel("Net Cum.")
ax2.legend()
elif ax is None:
fig, ax = plt.subplots()
else:
fig = None
if _coincidence_ratio:
ax.text(
0.5, 0.98,
f'Coincidence Ratio = {_coincidence_ratio:0.4f}',
horizontalalignment='center',
verticalalignment = 'top',
transform = ax.transAxes,
)
ax.bins = bins
ax.plot(x_points, y_points_1, label=prob_label, lw=1.5)
if model_choice is not None:
ax.fill_between(x_points, y_points_2, label=obs_label, step=None, facecolor='#ffbe4d', edgecolor='#ffa200', lw=1.5)
ax.legend()
if not discrete:
ax.set_xlim(x_points[0], x_points[-1])
if xscale:
if isinstance(xscale, str):
ax.set_xscale(xscale)
elif isinstance(xscale, dict):
ax.set_xscale(**xscale)
else:
raise ValueError(f"xscale must be str or dict, not {type(xscale)}")
if xmajorticks is not None:
ax.set_xticks(xmajorticks)
ax.set_xticklabels(xmajorticks)
if xminorticks is not None:
ax.set_xticks(xminorticks, minor=True)
if x_discrete_labels is not None:
ax.set_xticks(numpy.arange(len(x_discrete_labels)))
ax.set_xticklabels(x_discrete_labels)
if accumulator and model_choice is not None:
ax2.set_xlabel(xlabel)
else:
ax.set_xlabel(xlabel)
ax.set_yticks([])
ax.set_ylabel(ylabel)
if fig is None:
return ax
fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = ax
return result
def distribution_on_idca_variable(
model,
x,
xlabel=None,
ylabel='Relative Frequency',
style='hist',
bins=None,
pct_bins=20,
range=None,
xlim=None,
prob_label="Modeled",
obs_label="Observed",
subselector=None,
probability=None,
bw_method=None,
discrete=None,
ax=None,
format='ax',
**kwargs,
):
"""
Generate a figure of observed and modeled choices over a range of variable values.
Parameters
----------
model : Model
The discrete choice model to analyze.
x : str or array-like
The name of an `idca` variable, or an array giving its values. If this name exactly
matches that of an `idca` column in the model's loaded `dataframes`, then
those values are used, otherwise the variable is loaded from the model's
`dataservice`.
xlabel : str, optional
A label to use for the x-axis of the resulting figure. If not given,
the value of `x` is used if it is a string. Set to `False` to omit the
x-axis label.
ylabel : str, default "Relative Frequency"
A label to use for the y-axis of the resulting figure.
style : {'hist', 'kde'}
The style of figure to produce, either a histogram or a kernel density plot.
bins : int, default 25
The number of bins to use, only applicable to histogram style.
range : 2-tuple, optional
A range to truncate the figure. (alias `xlim`)
prob_label : str, default "Modeled"
A label to put in the legend for the modeled probabilities
obs_label : str, default "Observed"
A label to put in the legend for the observed choices
subselector : str or array-like, optional
A filter to apply to cases. If given as a string, this is loaded from the
model's `dataservice` as an `idco` variable.
probability : array-like, optional
The pre-calculated probability array for all cases in this analysis.
If not given, the probability array is calculated at the current parameter
values.
ax : matplotlib.Axes, optional
If given, the figure will be drawn on these axes and they will be returned,
otherwise new blank axes are used to draw the figure.
format : {'ax', 'figure', 'svg'}, default 'figure'
How to return the result if it is a figure. The default is to return
the raw matplotlib Axes instance. Change this to `svg` to get a SVG
rendering as an xmle.Elem.
Other Parameters
----------------
header : str, optional
A header to attach to the figure. The header is not generated using
matplotlib, but instead is prepended to the xml output with a header tag before the
rendered svg figure.
Returns
-------
Elem or Axes
Returns `ax` if given as an argument, otherwise returns a rendering as an Elem
"""
if model is None:
return lambda mdl: distribution_on_idca_variable(
mdl,
xlabel=xlabel,
ylabel=ylabel,
style=style,
bins=bins,
pct_bins=pct_bins,
range=range,
xlim=xlim,
prob_label=prob_label,
obs_label=obs_label,
subselector=subselector,
probability=probability,
bw_method=bw_method,
discrete=discrete,
ax=ax,
format=format,
**kwargs,
)
if xlim is not None and range is None:
range = xlim
if isinstance(x, str):
x_label = x
if model.dataframes and model.dataframes.data_ca_or_ce is not None and x in model.dataframes.data_ca_or_ce:
x = model.dataframes.data_ca_or_ce[x].values.reshape(-1)
else:
x = model.dataservice.make_dataframes({'ca': [x]}, explicit=True).array_ca().reshape(-1)
else:
try:
x_label = x.name
except AttributeError:
x_label = ''
# if model.dataframes and model.dataframes.data_ca is not None and continuous_variable in model.dataframes.data_ca:
# cv = model.dataframes.data_ca[continuous_variable].values.reshape(-1)
# else:
# cv = model.dataservice.make_dataframes({'ca': [continuous_variable]}, explicit=True).array_ca().reshape(-1)
discrete_values = None
if discrete:
discrete_values = numpy.unique(x)
elif discrete is None:
from .histograms import seems_like_discrete_data
discrete, discrete_values = seems_like_discrete_data(numpy.asarray(x).reshape(-1), return_uniques=True)
x_discrete_labels = None if discrete_values is None else [str(i) for i in discrete_values]
if bins is None:
if x_discrete_labels is not None:
# Discrete bins using defined labels
bins = numpy.arange(len(x_discrete_labels)+1)
if isinstance(x.dtype, pandas.CategoricalDtype):
# Discrete bins using implied labels
discrete_values = numpy.arange(len(x_discrete_labels))
bins = numpy.arange(len(x_discrete_labels)+1)
x = x.cat.codes
else:
x_ = x
if model.dataframes.data_av is not None and model.dataframes.data_ca is not None:
x_ = x[model.dataframes.data_av.values.reshape(-1) != 0]
low_pctile = 0
high_pctile = 100
if range is not None:
import scipy.stats
if range[0] is not None:
low_pctile = scipy.stats.percentileofscore(x_, range[0])
if range[1] is not None:
high_pctile = scipy.stats.percentileofscore(x_, range[1])
if isinstance(pct_bins, int):
bins = numpy.percentile(x_, numpy.linspace(low_pctile, high_pctile, pct_bins + 1))
else:
bins = numpy.percentile(x_, pct_bins)
elif isinstance(bins, int) and model.dataframes.data_av is not None and model.dataframes.data_ca is not None:
# Equal width bin generation using only available alternatives
x_ = x[model.dataframes.data_av.values.reshape(-1) != 0]
if range is not None:
range_low, range_high = range
if range_low is None:
range_low = x_.min()
if range_high is None:
range_high = x_.max()
else:
range_low = x_.min()
range_high = x_.max()
bins = numpy.linspace(range_low, range_high, bins + 1)
if probability is None:
probability = model.probability()
model_result = probability[:, :model.dataframes.n_alts]
model_choice = model.dataframes.data_ch.values
if model.dataframes.data_wt is not None:
model_result = model_result.copy()
model_result *= model.dataframes.data_wt.values.reshape(-1,1)
model_choice = model_choice.copy()
model_choice *= model.dataframes.data_wt.values.reshape(-1,1)
if subselector is not None:
if isinstance(subselector, str):
subselector = model.dataservice.make_dataframes({'co': [subselector]}, explicit=True).array_co(dtype=bool).reshape(-1)
x = numpy.asarray(x).reshape(*model_result.shape)[subselector].reshape(-1)
model_result = model_result[subselector]
model_choice = model_choice[subselector]
if style == 'kde':
import scipy.stats
kernel_result = scipy.stats.gaussian_kde(x, bw_method=bw_method, weights=model_result.reshape(-1))
common_bw = kernel_result.covariance_factor()
kernel_choice = scipy.stats.gaussian_kde(x, bw_method=common_bw, weights=model_choice.reshape(-1))
if range is None:
x_ = x
if model.dataframes.data_av is not None and model.dataframes.data_ca is not None:
x_ = x[model.dataframes.data_av.values.reshape(-1) != 0]
range = (x_.min(), x_.max())
x_points = numpy.linspace(*range, 250)
y_points_1 = kernel_result(x_points)
y_points_2 = kernel_choice(x_points)
else:
if range is not None:
range_low, range_high = range
if range_low is None:
range_low = x.min()
if range_high is None:
range_high = x.max()
range = (range_low, range_high)
y_points_1, x1 = numpy.histogram(
x,
weights=model_result.reshape(-1),
bins=bins,
range=range,
density=True,
)
y_points_2, x2 = numpy.histogram(
x,
weights=model_choice.reshape(-1),
bins=x1,
density=True,
)
shift = 0.4 if discrete else 0
gap = 0.2 if discrete else 0
x_points, y_points_1 = pseudo_bar_data(x1 - shift, y_points_1, gap=gap)
x_points, y_points_2 = pseudo_bar_data(x1 - shift, y_points_2, gap=gap)
if xlabel is None:
xlabel = x_label
if xlabel is False:
xlabel = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = None
ax.bins = bins
ax.plot(x_points, y_points_1, label=prob_label, lw=1.5)
ax.fill_between(x_points, y_points_2, label=obs_label, step=None, facecolor='#ffbe4d', edgecolor='#ffa200', lw=1.5)
ax.legend()
if not discrete:
ax.set_xlim(x_points[0], x_points[-1])
if x_discrete_labels is not None:
ax.set_xticks(numpy.arange(len(x_discrete_labels)))
ax.set_xticklabels(x_discrete_labels)
ax.set_xlabel(xlabel)
ax.set_yticks([])
ax.set_ylabel(ylabel)
if fig is None:
return ax
fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = ax
return result
from .. import Model
Model.distribution_on_idca_variable = distribution_on_idca_variable
def share_figure(
x,
probability,
choices=None,
weights=1,
xlabel=None,
bins=None,
pct_bins=20,
figsize=(12, 4),
style='stacked',
discrete=None,
xlim=None,
xscale=None,
xmajorticks=None,
xminorticks=None,
include_nests=False,
exclude_alts=None,
format='figure',
**kwargs,
):
"""
Generate a figure of variables over a range of variable values.
Parameters
----------
x : array-like, 1-d
An array giving values for some variable.
probability : array-like, 2-d
The pre-calculated probability array for all cases in this analysis.
First dimension must be the same shape as `x`. The second dimension
represents the alternatives (or similar).
choices : array-like, optional
The observed choices array for all cases in this analysis. If provided,
the first dimension must be the same shape as `x`. The second dimension
represents the alternatives (or similar).
weights : array-like, 1-d, optional
The case weights for all cases in this analysis. If provided,
the shape must be the same shape as `x`.
xlabel : str, optional
A label to use for the x-axis of the resulting figure. If not given,
the value of `x.name` is used if it exists. Set to `False` to omit the
x-axis label.
bins : int, optional
The number of equal-sized bins to use.
pct_bins : int or array-like, default 20
The number of equal-mass bins to use.
style : {'stacked', 'dataframe', 'many'}
The type of output to generate.
discrete : bool, default False
Whether to treat the data values explicitly as discrete (vs continuous)
data. This will change the styling and automatic bin generation. If
there are very few unique values, the data will be assumed to be
discrete anyhow.
xlim : 2-tuple, optional
Explicitly set the range of values shown on the x axis of generated
figures. This can truncate long tails. The actual histogram bins
are not changed.
include_nests : bool, default False
Whether to include nests in the figure.
exclude_alts : Collection, optional
Alternatives to exclude from the figure.
filter : str, optional
A filter that will be used to select only a subset of cases.
format : {'figure','svg'}, default 'figure'
How to return the result if it is a figure. The default is to return
the raw matplotlib Figure instance, ot set to `svg` to get a SVG
rendering as an xmle.Elem.
Returns
-------
Figure, DataFrame, or Elem
"""
if style not in {'stacked', 'dataframe', 'many'}:
raise ValueError("style must be in {'stacked', 'dataframe', 'many'}")
if include_nests and style == 'stacked' and exclude_alts is None:
import warnings
warnings.warn("including nests in a stacked figure is likely to give "
"misleading results unless constituent alternatives are omitted")
if exclude_alts is None:
exclude_alts = set()
if xlabel is None:
try:
xlabel = x.name
except AttributeError:
pass
filter_ = slice(None)
h_pr = {}
h_ch = {}
discrete_values = None
if discrete:
discrete_values = numpy.unique(x)
elif discrete is None:
from .histograms import seems_like_discrete_data
discrete, discrete_values = seems_like_discrete_data(x, return_uniques=True)
pr = numpy.asarray(probability)
if choices is not None:
ch = numpy.asarray(choices)
else:
ch = None
wt = numpy.asarray(weights)
x_discrete_labels = None if discrete_values is None else [str(i) for i in discrete_values]
if bins is None:
if isinstance(x.dtype, pandas.CategoricalDtype):
discrete_values = numpy.arange(len(x_discrete_labels))
bins = numpy.arange(len(x_discrete_labels)+1)
x = x.cat.codes
elif isinstance(pct_bins, int):
bins = numpy.percentile(x, numpy.linspace(0, 100, pct_bins + 1))
else:
bins = numpy.percentile(x, pct_bins)
try:
columns = probability.columns
except AttributeError:
columns = None
else:
columns = dict(enumerate(columns))
# check for correct array shapes, raise helpful message if not compatible
pr_w_shape = numpy.broadcast_shapes(pr[:, 0].shape, wt.shape)
if x.shape != pr_w_shape:
raise ValueError(
f"incompatible shapes, "
f"x.shape={x.shape}, "
f"pr.shape={pr.shape}, "
f"wt.shape={wt.shape}, "
f"(pr[:,i]*wt).shape={pr_w_shape}"
)
if ch is not None:
ch_w_shape = numpy.broadcast_shapes(ch[:, 0].shape, wt.shape)
if x.shape != ch_w_shape:
raise ValueError(
f"incompatible shapes, "
f"x.shape={x.shape}, "
f"ch.shape={ch.shape}, "
f"wt.shape={wt.shape}, "
f"(ch[:,i]*wt).shape={ch_w_shape}"
)
for i in range(pr.shape[1]):
h_pr[i], _ = numpy.histogram(
x,
weights=pr[:, i] * wt,
bins=bins,
)
if ch is not None:
h_ch[i], _ = numpy.histogram(
x,
weights=ch[:, i] * wt,
bins=bins,
)
h_pr = pandas.DataFrame(h_pr)
h_pr.index = pandas.IntervalIndex.from_breaks(bins) # bins[:-1]
h_pr.rename(columns=columns, inplace=True)
_denominator, _ = numpy.histogram(
x,
weights=pr.sum(1) * wt,
bins=bins,
)
h_pr_share = (h_pr / _denominator.reshape(-1, 1))
if ch is not None:
_denominator_ch, _ = numpy.histogram(
x,
weights=ch.sum(1) * wt,
bins=bins,
)
h_ch = pandas.DataFrame(h_ch)
h_ch.index = h_pr.index
h_ch.rename(columns=columns, inplace=True)
h_ch_share = (h_ch / _denominator_ch.reshape(-1, 1))
else:
h_ch_share = None
if discrete:
x_placement = numpy.arange(len(bins)-1)
x_alignment = 'center'
bin_widths = 0.8
else:
x_placement = bins[:-1]
x_alignment = 'edge'
bin_widths = bins[1:] - bins[:-1]
if xlabel is False:
xlabel = None
if xlim is None:
xlim = (bins[0], bins[-1])
if style == 'dataframe':
if ch is not None:
result = pandas.concat({
'Modeled Shares': h_pr_share,
'Observed Shares': h_ch_share,
}, axis=1, sort=False)
else:
result = pandas.concat({
'Modeled Shares': h_pr_share,
}, axis=1, sort=False)
result['Count', '*'] = h_pr.sum(1)
if xlabel:
result.index.name = xlabel
elif style == 'stacked':
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=figsize)
bottom0 = 0
bottom1 = 0
for i in h_pr_share.columns:
ax0.bar(
x_placement,
height=h_pr_share[i],
bottom=bottom0,
width=bin_widths,
align=x_alignment,
label=i,
)
bottom0 = h_pr_share[i].fillna(0).values + bottom0
ax1.bar(
x_placement,
height=h_ch_share[i],
bottom=bottom1,
width=bin_widths,
align=x_alignment,
)
bottom1 = h_ch_share[i].fillna(0).values + bottom1
ax0.set_ylim(0, 1)
if not discrete:
ax0.set_xlim(*xlim)
if xscale:
if isinstance(xscale, str):
ax0.set_xscale(xscale)
elif isinstance(xscale, dict):
ax0.set_xscale(**xscale)
else:
raise ValueError(f"xscale must be str or dict, not {type(xscale)}")
if xmajorticks is not None:
ax0.set_xticks(xmajorticks)
ax0.set_xticklabels(xmajorticks)
if xminorticks is not None:
ax0.set_xticks(xminorticks, minor=True)
if x_discrete_labels is not None:
ax0.set_xticks(numpy.arange(len(x_discrete_labels)))
ax0.set_xticklabels(x_discrete_labels)
ax0.set_title('Modeled Shares')
ax1.set_ylim(0, 1)
if not discrete:
ax1.set_xlim(*xlim)
if xscale:
if isinstance(xscale, str):
ax1.set_xscale(xscale)
elif isinstance(xscale, dict):
ax1.set_xscale(**xscale)
else:
raise ValueError(f"xscale must be str or dict, not {type(xscale)}")
if xmajorticks is not None:
ax1.set_xticks(xmajorticks)
ax1.set_xticklabels(xmajorticks)
if xminorticks is not None:
ax1.set_xticks(xminorticks, minor=True)
if x_discrete_labels is not None:
ax1.set_xticks(numpy.arange(len(x_discrete_labels)))
ax1.set_xticklabels(x_discrete_labels)
ax1.set_title('Observed Shares')
if xlabel:
ax0.set_xlabel(xlabel)
ax1.set_xlabel(xlabel)
fig.legend(
loc='center right',
)
# fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = fig
else:
fig, axes = plt.subplots(len(h_pr_share.columns), 1, figsize=figsize)
shift = 0.4 if discrete else 0
for n,i in enumerate(h_pr_share.columns):
x_, y_ = pseudo_bar_data(bins-shift, h_pr_share[i], gap=0.2 if discrete else 0)
axes[n].plot(x_, y_, label='Modeled' if n==0 else None, lw=1.5)
x_ch_, y_ch_ = pseudo_bar_data(bins-shift, h_ch_share[i], gap=0.2 if discrete else 0)
axes[n].fill_between(
x_ch_, y_ch_, label='Observed' if n==0 else None, step=None,
facecolor='#ffbe4d', edgecolor='#ffa200',
lw=1.5,
)
if not discrete:
axes[n].set_xlim(*xlim)
if xscale:
if isinstance(xscale, str):
axes[n].set_xscale(xscale)
elif isinstance(xscale, dict):
axes[n].set_xscale(**xscale)
else:
raise ValueError(f"xscale must be str or dict, not {type(xscale)}")
if xmajorticks is not None:
axes[n].set_xticks(xmajorticks)
axes[n].set_xticklabels(xmajorticks)
if xminorticks is not None:
axes[n].set_xticks(xminorticks, minor=True)
if x_discrete_labels is not None:
axes[n].set_xticks(numpy.arange(len(x_discrete_labels)))
axes[n].set_xticklabels(x_discrete_labels)
axes[n].set_ylabel(i)
# axes[n].legend(
# # loc='center right',
# )
legnd = axes[0].legend(
loc='lower center',
ncol=2,
borderaxespad=0,
bbox_to_anchor=(0.5, 1.08)
)
if xlabel:
axes[-1].set_xlabel(xlabel)
#fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, bbox_extra_artists=[legnd], **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = fig
return result
def distribution_on_idco_variable(
model,
x,
xlabel=None,
bins=None,
pct_bins=20,
figsize=(12, 4),
style='stacked',
discrete=None,
xlim=None,
include_nests=False,
exclude_alts=None,
filter=None,
format='figure',
**kwargs,
):
"""
Generate a figure of variables over a range of variable values.
Parameters
----------
model : Model
The discrete choice model to analyze.
x : str or array-like
The name of an `idco` variable, or an array giving its values. If this name exactly
matches that of an `idco` column in the model's loaded `dataframes`, then
those values are used, otherwise the variable is loaded from the model's
`dataservice`.
xlabel : str, optional
A label to use for the x-axis of the resulting figure. If not given,
the value of `x` is used if it is a string. Set to `False` to omit the
x-axis label.
bins : int, optional
The number of equal-sized bins to use.
pct_bins : int or array-like, default 20
The number of equal-mass bins to use.
style : {'stacked', 'dataframe', 'many'}
The type of output to generate.
discrete : bool, default False
Whether to treat the data values explicitly as discrete (vs continuous)
data. This will change the styling and automatic bin generation. If
there are very few unique values, the data will be assumed to be
discrete anyhow.
xlim : 2-tuple, optional
Explicitly set the range of values shown on the x axis of generated
figures. This can truncate long tails. The actual histogram bins
are not changed.
include_nests : bool, default False
Whether to include nests in the figure.
exclude_alts : Collection, optional
Alternatives to exclude from the figure.
filter : str, optional
A filter that will be used to select only a subset of cases.
format : {'figure','svg'}, default 'figure'
How to return the result if it is a figure. The default is to return
the raw matplotlib Figure instance, ot set to `svg` to get a SVG
rendering as an xmle.Elem.
Returns
-------
Figure, DataFrame, or Elem
"""
if style not in {'stacked', 'dataframe', 'many'}:
raise ValueError("style must be in {'stacked', 'dataframe', 'many'}")
if include_nests and style == 'stacked' and exclude_alts is None:
import warnings
warnings.warn("including nests in a stacked figure is likely to give "
"misleading results unless constituent alternatives are omitted")
if exclude_alts is None:
exclude_alts = set()
if isinstance(x, str):
x_label = x
if model.dataframes and model.dataframes.data_co is not None and x in model.dataframes.data_co:
x = model.dataframes.data_co[x].values.reshape(-1)
else:
x = model.dataservice.make_dataframes({'co': [x]}, explicit=True).array_co().reshape(-1)
else:
try:
x_label = x.name
except AttributeError:
x_label = ''
if filter:
_ds = model.dataservice if model.dataservice is not None else model.dataframes
filter_ = _ds.make_dataframes(
{'co': [filter]},
explicit=True,
float_dtype=bool,
).array_co().reshape(-1)
x = x[filter_]
else:
filter_ = slice(None)
h_pr = {}
h_ch = {}
discrete_values = None
if discrete:
discrete_values = numpy.unique(x)
elif discrete is None:
from .histograms import seems_like_discrete_data
discrete, discrete_values = seems_like_discrete_data(x, return_uniques=True)
pr = model.probability(
return_dataframe='names',
include_nests=bool(include_nests),
).loc[filter_,:]
if include_nests:
ch = model.dataframes.data_ch_cascade(model.graph).loc[filter_,:]
else:
ch = model.dataframes.data_ch.loc[filter_,:]
if model.dataframes.data_wt is None:
wt = 1
else:
wt = model.dataframes.data_wt.values.reshape(-1)[filter_]
x_discrete_labels = None if discrete_values is None else [str(i) for i in discrete_values]
if bins is None:
if isinstance(x.dtype, pandas.CategoricalDtype):
discrete_values = numpy.arange(len(x_discrete_labels))
bins = numpy.arange(len(x_discrete_labels)+1)
x = x.cat.codes
elif isinstance(pct_bins, int):
bins = numpy.percentile(x, numpy.linspace(0, 100, pct_bins + 1))
else:
bins = numpy.percentile(x, pct_bins)
n_alts = model.graph.n_elementals()
columns = {}
for i in range(pr.shape[1]):
columns[i] = pr.columns[i]
if i < n_alts or include_nests is True or model.graph.standard_sort[i] in include_nests:
if model.graph.standard_sort[i] == model.graph.root_id:
continue
if model.graph.standard_sort[i] in exclude_alts:
continue
h_pr[i], _ = numpy.histogram(
x,
weights=pr.iloc[:, i] * wt,
bins=bins,
)
h_ch[i], _ = numpy.histogram(
x,
weights=ch.iloc[:, i] * wt,
bins=bins,
)
h_pr = pandas.DataFrame(h_pr)
h_pr.index = pandas.IntervalIndex.from_breaks(bins) # bins[:-1]
h_pr.rename(columns=columns, inplace=True)
_denominator, _ = numpy.histogram(
x,
weights=numpy.ones_like(pr.iloc[:, -1]) * wt,
bins=bins,
)
h_pr_share = (h_pr / _denominator.reshape(-1, 1))
h_ch = pandas.DataFrame(h_ch)
h_ch.index = h_pr.index
h_ch.rename(columns=columns, inplace=True)
h_ch_share = (h_ch / _denominator.reshape(-1, 1))
if discrete:
x_placement = numpy.arange(len(bins)-1)
x_alignment = 'center'
bin_widths = 0.8
else:
x_placement = bins[:-1]
x_alignment = 'edge'
bin_widths = bins[1:] - bins[:-1]
if xlabel is None:
xlabel = x_label
if xlabel is False:
xlabel = None
if xlim is None:
xlim = (bins[0], bins[-1])
if style == 'dataframe':
result = pandas.concat({
'Modeled Shares': h_pr_share,
'Observed Shares': h_ch_share,
}, axis=1, sort=False)
result['Count', '*'] = h_pr.sum(1)
if x_label:
result.index.name = xlabel
elif style == 'stacked':
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=figsize)
bottom0 = 0
bottom1 = 0
for i in h_pr_share.columns:
ax0.bar(
x_placement,
height=h_pr_share[i],
bottom=bottom0,
width=bin_widths,
align=x_alignment,
label=i,
)
bottom0 = h_pr_share[i].values + bottom0
ax1.bar(
x_placement,
height=h_ch_share[i],
bottom=bottom1,
width=bin_widths,
align=x_alignment,
)
bottom1 = h_ch_share[i].values + bottom1
ax0.set_ylim(0, 1)
if not discrete:
ax0.set_xlim(*xlim)
if x_discrete_labels is not None:
ax0.set_xticks(numpy.arange(len(x_discrete_labels)))
ax0.set_xticklabels(x_discrete_labels)
ax0.set_title('Modeled Shares')
ax1.set_ylim(0, 1)
if not discrete:
ax1.set_xlim(*xlim)
if x_discrete_labels is not None:
ax1.set_xticks(numpy.arange(len(x_discrete_labels)))
ax1.set_xticklabels(x_discrete_labels)
ax1.set_title('Observed Shares')
if xlabel:
ax0.set_xlabel(xlabel)
ax1.set_xlabel(xlabel)
fig.legend(
loc='center right',
)
# fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = fig
else:
fig, axes = plt.subplots(len(h_pr_share.columns), 1, figsize=figsize)
shift = 0.4 if discrete else 0
for n,i in enumerate(h_pr_share.columns):
x_, y_ = pseudo_bar_data(bins-shift, h_pr_share[i], gap=0.2 if discrete else 0)
axes[n].plot(x_, y_, label='Modeled' if n==0 else None, lw=1.5)
x_ch_, y_ch_ = pseudo_bar_data(bins-shift, h_ch_share[i], gap=0.2 if discrete else 0)
axes[n].fill_between(
x_ch_, y_ch_, label='Observed' if n==0 else None, step=None,
facecolor='#ffbe4d', edgecolor='#ffa200',
lw=1.5,
)
if not discrete:
axes[n].set_xlim(*xlim)
if x_discrete_labels is not None:
axes[n].set_xticks(numpy.arange(len(x_discrete_labels)))
axes[n].set_xticklabels(x_discrete_labels)
axes[n].set_ylabel(i)
# axes[n].legend(
# # loc='center right',
# )
legnd = axes[0].legend(
loc='lower center',
ncol=2,
borderaxespad=0,
bbox_to_anchor=(0.5, 1.08)
)
if xlabel:
axes[-1].set_xlabel(xlabel)
#fig.tight_layout(pad=0.5)
if format == 'svg':
result = plot_as_svg_xhtml(fig, bbox_extra_artists=[legnd], **kwargs)
fig.clf()
plt.close(fig)
elif format == 'png':
from .png import make_png
result = make_png(fig, **kwargs)
fig.clf()
plt.close(fig)
else:
result = fig
return result
Model.distribution_on_idco_variable = distribution_on_idco_variable
| gpl-3.0 |
vincent-noel/libSigNetSim | setup.py | 1 | 3221 | #!/usr/bin/env python
from setuptools import find_packages
from distutils.core import setup, Extension
from os.path import dirname, join
setup(name='libsignetsim',
version=open(join(dirname(__file__), 'VERSION')).read(),
description='Python library designed for building, adjusting and analyzing quantitative biological models.',
author='Vincent Noel',
author_email='[email protected]',
url='',
packages=find_packages(),
include_package_data=True,
install_requires=[
'matplotlib',
'python-libsbml',
'python-libnuml',
'python-libsedml',
'sympy<1.2',
'numpy',
'scipy',
'pydstool',
'jinja2',
'bioservices',
'lxml',
'coveralls',
'future'
],
ext_modules=[
Extension(
'libsignetsim.lib.integrate.integrate',
sources=[
'libsignetsim/lib/integrate/src/shared.c',
'libsignetsim/lib/integrate/src/events.c',
'libsignetsim/lib/integrate/src/ode.c',
'libsignetsim/lib/integrate/src/dae.c',
'libsignetsim/lib/integrate/src/integrate.c',
'libsignetsim/lib/integrate/src/realtype_math.c',
],
include_dirs=[
"/usr/include/cvode/",
"/usr/include/ida/",
"/usr/include/nvector"
"/usr/include/sundials",
],
libraries=['sundials_cvode', 'sundials_nvecserial', 'sundials_ida', 'm', 'lapack', 'atlas', 'blas'],
library_dirs=['/usr/lib64/atlas-basic/'],
define_macros=(
[("SUNDIALS3", None)] if "SUNDIALS_VERSION_MAJOR 3" in open("/usr/include/sundials/sundials_config.h").read() else None
),
),
Extension(
'libsignetsim.lib.plsa.libplsa-serial',
sources=[
'libsignetsim/lib/plsa/src/config.c',
'libsignetsim/lib/plsa/src/error.c',
'libsignetsim/lib/plsa/src/distributions.c',
'libsignetsim/lib/plsa/src/random.c',
'libsignetsim/lib/plsa/src/plsa.c',
'libsignetsim/lib/plsa/src/lsa.c',
'libsignetsim/lib/plsa/src/moves.c',
'libsignetsim/lib/plsa/src/state.c',
'libsignetsim/lib/plsa/src/score.c',
]
),
Extension(
'libsignetsim.lib.plsa.libplsa-parallel',
sources=[
'libsignetsim/lib/plsa/src/config.c',
'libsignetsim/lib/plsa/src/error.c',
'libsignetsim/lib/plsa/src/distributions.c',
'libsignetsim/lib/plsa/src/random.c',
'libsignetsim/lib/plsa/src/plsa.c',
'libsignetsim/lib/plsa/src/lsa.c',
'libsignetsim/lib/plsa/src/moves.c',
'libsignetsim/lib/plsa/src/state.c',
'libsignetsim/lib/plsa/src/score.c',
'libsignetsim/lib/plsa/src/mixing.c',
'libsignetsim/lib/plsa/src/tuning.c',
],
include_dirs=[
"/usr/lib/openmpi/include/openmpi/opal/mca/event/libevent2021/libevent",
"/usr/lib/openmpi/include/openmpi/opal/mca/event/libevent2021/libevent/include",
"/usr/lib/openmpi/include",
"/usr/lib/openmpi/include/openmpi",
"/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2021/libevent",
"/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2021/libevent/include",
"/usr/lib/x86_64-linux-gnu/openmpi/include",
"/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi",
"/usr/lib64/mpi/gcc/openmpi/include/", # Opensuse
"/usr/include/openmpi-x86_64/", # Fedora
],
define_macros=[("MPI", None)],
extra_compile_args=["-pthread"]
)
],
)
| gpl-3.0 |
KarrLab/kinetic_datanator | tests/data_source/brenda/test_reaction.py | 1 | 2352 | import unittest
import shutil
import tempfile
from datanator.data_source.brenda import reaction
from datanator_query_python.config import config
import pandas as pd
class TestBrendaRxn(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf = config.TestConfig()
cls.collection_str = 'brenda_reaction'
username = conf.USERNAME
password = conf.PASSWORD
MongoDB = conf.SERVER
cls.src = reaction.BrendaRxn(MongoDB=MongoDB, db='test', collection_str=cls.collection_str,
username=username, password=password, authSource='admin',
max_entries=20, verbose=True)
@classmethod
def tearDownClass(cls):
cls.src.db_obj.drop_collection(cls.collection_str)
cls.src.client.close()
# @unittest.skip('passed')
def test_download_and_read(self):
result = self.src.download_and_read()
self.assertEqual(result['ec_number'][1], '6.3.2.1')
def test_clean_up(self):
result = self.src.download_and_read()
exp = self.src.clean_up(result)
self.assertEqual(exp['reaction_id_brenda'][1], ['BR101'])
self.assertEqual(exp['reaction_id_sabio_rk'][1], 2406)
# @unittest.skip('passed')
def test_parse_reaction(self):
df = pd.DataFrame({'reaction': ['ATP + (R)-pantoate + beta-alanine <=> AMP + diphosphate + (R)-pantothenate',
'ATP + Detyrosinated alpha-tubulin + L-Tyrosine = alpha-Tubulin + ADP + Orthophosphate']})
result = self.src.parse_reaction(df)
self.assertEqual(result['products'][1][1], 'ADP')
self.assertEqual(result['substrates'][0][1], '(R)-pantoate')
# @unittest.skip('passed')
def test_load_df_sim(self):
df = pd.DataFrame({'reaction': ['ATP + (R)-pantoate + beta-alanine <=> AMP + diphosphate + (R)-pantothenate',
'ATP + Detyrosinated alpha-tubulin + L-Tyrosine = alpha-Tubulin + ADP + Orthophosphate']})
result = self.src.parse_reaction(df)
self.src.load_df(result)
# @unittest.skip('passed')
def test_load_df_real(self):
result = self.src.download_and_read()
self.src.clean_up(result)
x = self.src.parse_reaction(result)
self.src.load_df(x.head(100)) | mit |
mhvk/astropy | astropy/visualization/tests/test_histogram.py | 5 | 2299 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_PLT, HAS_SCIPY
if HAS_PLT:
import matplotlib.pyplot as plt
import pytest
import numpy as np
from astropy.visualization import hist
from astropy.stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [9.99999914e+05, -8.31312483e-03, 6.52755852e-02, 1.43104653e-03,
-2.26311017e-02, 2.82660007e-03, 1.80307521e-02, 9.26294279e-03,
5.06606026e-02, 2.05418011e-03]
with pytest.raises(ValueError):
hist(data, bins='freedman', max_bins=10000)
| bsd-3-clause |
nal-epfl/line-sigcomm14 | plotting-scripts/plot-data.py | 1 | 36287 | #!/usr/bin/env python2
# Install dependencies:
# sudo apt-get install python-matplotlib dvipng
import colorsys
import getopt
import json
from nicePlot import nicePlot
import math
import numpy
import os
import pprint
import re
import subprocess
import sys
## Params
dataFile = 'data-plot1.txt'
latencyFile = 'latency.txt'
throughputInFile = 'throughput-in.txt'
throughputOutFile = 'throughput-out.txt'
inputDir = ''
outputDir = ''
plotNumber = '1'
extraTitleLabel = ''
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['plot=', 'in=', 'out=', 'label='])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
output = None
verbose = False
for opt, arg in opts:
if opt == '--plot':
plotNumber = arg
elif opt == '--in':
inputDir = arg
elif opt == '--out':
outputDir = arg
elif opt == '--label':
extraTitleLabel = arg
else:
assert False, "Unhandled option: " + str(opt)
if plotNumber == 'all':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for plot in [ '1', '1b', '2', '3', '4', '5', '5b', '7', '7b', '7b+', '8', '8b', '8b+' ]:
args = ['python', script, '--plot', plot]
print(args)
subprocess.call(args)
exit(0)
elif plotNumber == 'real-vs-shaping':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
args = ['python', script, '--plot', '1', '--in', 'plot1-real-data', '--out', 'plot1-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '1', '--in', 'plot1-emulator-data', '--out', 'plot1-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '7', '--in', 'plot7-real-data', '--out', 'plot7-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '7', '--in', 'plot7-emulator-data', '--out', 'plot7-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-real-data', '--out', 'plot2-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-emulator-data', '--out', 'plot2-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-shaping-real-data', '--out', 'plot2-shaping-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-shaping-emulator-data', '--out', 'plot2-shaping-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
exit(0)
elif plotNumber == 'vary-rtt-and-buffers':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
#for tag in ['', 'sigcomm-deadline', 'sigcomm-deadline-repro']:
#for tag in ['sigcomm-deadline-repro']:
for tag in ['']:
for qos in ['policing', 'shaping']:
niceqos = qos.capitalize()
for rtt in ['50', '80', '120', '200']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}'.format(niceqos, rtt, buffers, nicescaling)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-rtt':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['48-120', '120-48']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
for tcp in ['cubic']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
nicertt = rtt.replace('-', '(1)/') + '(2) ms'
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
label = '{0}, RTT {1}, {2} buffers, {3}, TCP {4}'.format(niceqos, nicertt, buffers, nicescaling, tcp)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-rtt-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['48-120', '120-48']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
for tcp in ['cubic', 'cubic-reno', 'reno-cubic']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}, TCP {4}'.format(niceqos, rtt, buffers, nicescaling, tcp)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['cubic-reno', 'reno-cubic']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['50', '80', '120', '200']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}, {4}'.format(niceqos, rtt, buffers, nicescaling, tag)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-qos':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '1-7'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-transfer-size':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '2'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-rtt':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '9'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '10'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-congestion':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '11'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
if plotNumber == '5':
if not inputDir:
inputDir = 'plot%s-data' % '1'
dataFile = 'data-plot%s.txt' % '1'
elif plotNumber == '5b':
if not inputDir:
inputDir = 'plot%s-data' % '1b'
dataFile = 'data-plot%s.txt' % '1b'
elif plotNumber == '7b+':
if not inputDir:
inputDir = 'plot%s-data' % '7b'
dataFile = 'data-plot%s.txt' % '7b'
elif plotNumber == '8':
if not inputDir:
inputDir = 'plot%s-data' % '7'
dataFile = 'data-plot%s.txt' % '7'
elif plotNumber == '8b' or plotNumber == '8b+':
if not inputDir:
inputDir = 'plot%s-data' % '7b'
dataFile = 'data-plot%s.txt' % '7b'
else:
if not inputDir:
inputDir = 'plot%s-data' % plotNumber
dataFile = 'data-plot%s.txt' % plotNumber
if not outputDir:
outputDir = 'plot%s' % plotNumber
print 'Arguments:'
print 'Input dir:', inputDir
print 'Data file:', dataFile
print 'Output dir:', outputDir
print 'Plot:', plotNumber
## End of params
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
def toBase64JS(filename):
return '"' + open(filename, 'rb').read().encode('base64').replace('\n', '" + \n"') + '"'
## Natural sorting (e.g.: asdf7, asdf8, asdf9, asdf10, ...)
# ref: http://stackoverflow.com/questions/4836710
def naturalSorted(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def reversed(l):
l2 = l
l2.reverse()
return l2
def sortCongestion(l):
convert = lambda text: int(1) if text == 'no1' else int(2) if text == 'no2' else int(3) if text == 'no3' else int(4) if text == 'lo' else int(5) if text == 'me' else int(6) if text == 'hi' else text
keyfunc = lambda key: [ convert(c) for c in re.split('/', key) ]
return sorted(l, key = keyfunc)
def mean(l):
return sum(l)/len(l) if len(l) > 0 else 0
class Experiment(object):
def __init__(self):
self.tag = ""
self.policing = ""
self.shaping = ""
self.tcp = ""
self.rtt = ""
self.congestion = ""
self.throughputIn = ""
self.pCongClass1PerPath = []
self.pCongClass2PerPath = []
self.pCongLinkComputed = []
self.procDelayAverage = 0
self.procDelayPeak = 0
self.queuingDelay = 0 # queuing delay for 1 frame
self.pathClass = []
self.congThreshold = 0.05 # congestion threshold in percent, modified below
def __repr__(self):
pp = pprint.PrettyPrinter(indent=2)
return "Experiment" + pp.pformat(self.__dict__)
def __str__(self):
pp = pprint.PrettyPrinter(indent=2)
return "Experiment" + pp.pformat(self.__dict__)
# Regenerate data file
print 'Regenerating data file from experiment data...'
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/path-congestion-probs.txt" 1>>{0} 2>/dev/null ; done'''.format(dataFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Event delay' 1>>{0} 2>/dev/null ; done'''.format(latencyFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Bits received per second' 1>>{0} 2>/dev/null ; done'''.format(throughputInFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Bits sent per second' 1>>{0} 2>/dev/null ; done'''.format(throughputOutFile, inputDir)]
print(args)
subprocess.call(args)
## Read data
print 'Reading data...'
latencyPeaks = []
latencyAverages = []
throughputInValues = []
throughputOutValues = []
def timestampStr2us(str):
# ' 0 0s 0m 98u 542n'
tokens = [float(re.sub('[a-z]', '', word)) for word in str.split()]
return tokens[0] * 1.0e9 + tokens[1] * 1.0e6 + tokens[2] * 1.0e3 + tokens[3] * 1.0e0 + tokens[4] * 1.0e-3
with open (latencyFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Event delay: avg 0 0s 0m 0u 361n ', ' max 0 0s 0m 98u 542n'
avgDelayStr = line.split(':')[1].split(',')[0].replace('avg', '')
maxDelayStr = line.split(':')[1].split(',')[1].replace('max', '')
latencyAverages.append(timestampStr2us(avgDelayStr))
latencyPeaks.append(timestampStr2us(maxDelayStr))
with open (throughputInFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Bits received per second: 67.6772 Mbps'
throughput = float(line.split(':')[1].split(' ')[1])
throughputInValues.append(throughput)
with open (throughputOutFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Bits sent per second: 67.6772 Mbps'
throughput = float(line.split(':')[1].split(' ')[1])
throughputOutValues.append(throughput)
experiments = []
with open (dataFile, "r") as myfile:
data = myfile.read()
for line in data.split('\n'):
tokens = line.split('\t')
for i in range(len(tokens)):
tokens[i] = tokens[i].strip()
if tokens[0] == 'Experiment':
if experiments:
print experiments[-1]
print 'Found experiment:', tokens[1]
experiments.append(Experiment())
experiments[-1].tag = tokens[1]
experiments[-1].procDelayAverage = latencyAverages.pop(0)
experiments[-1].procDelayPeak = latencyPeaks.pop(0)
experiments[-1].throughputIn = throughputInValues.pop(0)
experiments[-1].throughputOut = throughputOutValues.pop(0)
try:
experiments[-1].policing = re.compile('policing-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('policing-', '').replace('-', '/').replace('1.0/1.0', 'No policing')
except:
pass
try:
experiments[-1].shaping = re.compile('shaping-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('shaping-', '').replace('-', '/')
except:
pass
try:
experiments[-1].transferSize = re.compile('transfer-size-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('transfer-size-', '').replace('-', '/').replace('9999', 'Long')
sizes = experiments[-1].transferSize.split('/')
if len(sizes) == 2 and sizes[0] == sizes[1]:
experiments[-1].transferSize = sizes[0]
except:
pass
try:
experiments[-1].linkSpeed = re.compile('link-[0-9]+(\\.[0-9]+)?Mbps').search(experiments[-1].tag).group(0).replace('link-', '')
except:
pass
try:
experiments[-1].numFlows = str(4*int(re.compile('nflows-[0-9]+').search(experiments[-1].tag).group(0).replace('nflows-', '')))
except:
pass
try:
experiments[-1].tcp = re.compile('tcp-(cubic-reno|reno-cubic|cubic|reno)').search(experiments[-1].tag).group(0).replace('tcp-', '').replace('-', '/')
except:
pass
try:
experiments[-1].rtt = re.compile('rtt-[0-9]+-[0-9]+').search(experiments[-1].tag).group(0).replace('rtt-', '').replace('-', '/')
rtts = experiments[-1].rtt.split('/')
if len(rtts) == 2 and rtts[0] == rtts[1]:
experiments[-1].rtt = rtts[0]
except:
pass
try:
experiments[-1].congestion = re.compile('congestion-[a-zA-Z0-9]+-[a-zA-Z0-9]+').search(experiments[-1].tag).group(0).replace('congestion-', '').replace('-', '/').replace('light', 'lo').replace('medium', 'me').replace('high', 'hi').replace('none', 'no')
except:
pass
if experiments[-1].policing and experiments[-1].policing != 'No policing':
experiments[-1].congThreshold = 0.001
elif experiments[-1].shaping:
if experiments[-1].tcp == 'reno' and experiments[-1].transferSize == 'Long':
experiments[-1].congThreshold = 0.001
else:
experiments[-1].congThreshold = 0.001
else:
experiments[-1].congThreshold = 0.001
elif tokens[0] == 'Class':
experiments[-1].pathClass = [int(c) for c in tokens[1:]]
elif experiments and tokens[0] == str(experiments[-1].congThreshold):
print 'Hit 347'
for p in range(len(tokens) - 1):
print 'Hit 349'
if experiments[-1].pathClass[p] == 0:
experiments[-1].pCongClass1PerPath.append(float(tokens[1 + p]))
elif experiments[-1].pathClass[p] == 1:
experiments[-1].pCongClass2PerPath.append(float(tokens[1 + p]))
else:
print 'NO CLASS!!!', experiments[-1].pathClass[p]
print 'tokens = ', tokens
if len(tokens) < 2:
if experiments:
print experiments[-1]
break
if not experiments:
continue
# delay in us
# 100 Mb/s = 100 b/us
# d(us) = 1500 * 8(b) / speed(b/us)
experiments[-1].queuingDelay = (1500 * 8) / float(experiments[-1].linkSpeed.replace('Mbps', ''))
for e in experiments:
if not e.policing and not e.shaping:
e.policing = e.shaping = 'neutral'
args = ['bash', '-c', '''rm -f {0} 2>/dev/null'''.format(dataFile)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} {1} {2} 2>/dev/null'''.format(latencyFile, throughputInFile, throughputOutFile)]
print(args)
subprocess.call(args)
## End of data reading
## Group experiments
print 'Grouping eperiments by parameters...'
experimentGroup1 = {}
experimentGroupTitle = {}
experimentGroupLegendTitle = {}
if plotNumber == '1-7':
key1 = 'policing'
if len(set([getattr(e, key1) for e in experiments])) > 1:
plotNumber = '1'
key1 = 'shaping'
if len(set([getattr(e, key1) for e in experiments])) > 1:
plotNumber = '7'
if plotNumber == '1-7':
raise SystemExit('Error')
key1 = ''
if plotNumber == '1' or plotNumber == '1b' or plotNumber == '5' or plotNumber == '5b':
key1 = 'policing'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Policing'
elif plotNumber == '2':
key1 = 'transferSize'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Transfer size'
elif plotNumber == '3':
key1 = 'linkSpeed'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Bottleneck bandwidth'
elif plotNumber == '4':
key1 = 'numFlows'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Total number of flows'
elif plotNumber == '7' or plotNumber == '7b' or plotNumber == '8' or plotNumber == '8b':
key1 = 'shaping'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Shaping'
elif plotNumber == '7b+' or plotNumber == '8b+':
key1 = 'shaping'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Shaping (cong. thresh. 0.25\\%)'
elif plotNumber == '9':
key1 = 'rtt'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'RTT'
elif plotNumber == '10':
key1 = 'tcp'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'TCP'
elif plotNumber == '11':
key1 = 'congestion'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Congestion'
else:
print 'Bad plot number %s' % plotNumber
exit()
# 'key1': @key1 -> [ @experiment ]
allValues = sorted(list(set([getattr(e, key1) for e in experiments])))
experimentGroup1[key1] = {}
for v in allValues:
experimentGroup1[key1][str(v)] = [e for e in experiments if getattr(e, key1) == v]
# Remove empty lists
for key1 in experimentGroup1.keys():
for key2 in experimentGroup1[key1].keys():
if not experimentGroup1[key1][key2]:
del experimentGroup1[key1][key2]
if not experimentGroup1[key1]:
del experimentGroup1[key1]
print 'experimentGroup1 = ', experimentGroup1
## End of experiment grouping
## Plot
try:
os.makedirs(outputDir)
except OSError as exception:
pass
fig = 0
# [left, bottom, width, height]
figureBorder = [0.10, 0.20, 0.80, 0.70]
# Cleanup target dir
print 'Cleaning up target directory (%s)...' % outputDir
args = ['bash', '-c', '''cd '%s' && ls -1 | grep -E '^[0-9]+\\..*.(pdf|png)$' | while read -r f ; do rm -fv "$f" ; done || echo "Could not change dir to %s"''' % (outputDir, outputDir) ]
print(args)
subprocess.call(args)
print 'Generating plot %s...' % plotNumber
for key1 in naturalSorted(experimentGroup1.keys()):
if plotNumber == '1' or plotNumber == '1b' or plotNumber == '2' or plotNumber == '3' or plotNumber == '4' \
or plotNumber == '7' or plotNumber == '7b' or plotNumber == '7b+' or plotNumber == '9' or plotNumber == '10' \
or plotNumber == '11':
# Probability of congestion per class curve plots
curvex = []
curvexLabels = ['']
curves1y = []
curves2y = []
paramKeys = naturalSorted(experimentGroup1[key1].keys())
if plotNumber == '1' or plotNumber == '1b':
paramKeys = reversed(paramKeys)
elif plotNumber == '11':
paramKeys = sortCongestion(paramKeys)
print 'experimentGroup1[key1][key2][0] = ', experimentGroup1[key1][key2][0]
numPathsClass1 = len(experimentGroup1[key1][key2][0].pCongClass1PerPath)
for key2 in paramKeys:
curvex.append(1 + len(curvex))
curvexLabels.append(key2)
curvexLabels.append('')
print 'curvex =', curvex
print 'curvexLabels = ', curvexLabels
print 'numPathsClass1 = ', numPathsClass1
congThresholds = set()
for iPath in range(numPathsClass1):
curvey = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(sorted(e.pCongClass1PerPath)[iPath])
congThresholds.add(e.congThreshold)
print 'curvey (1) =', curvey
curves1y.append(curvey)
if len(congThresholds) == 1:
congThresholdLabel = str(congThresholds.pop())
else:
congThresholdLabel = str(congThresholds)
extraTitleLabel += " {0}\\%".format(congThresholdLabel)
numPathsClass2 = len(experimentGroup1[key1][key2][0].pCongClass2PerPath)
for iPath in range(numPathsClass2):
curvey = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(sorted(e.pCongClass2PerPath)[iPath])
print 'curvey (2) =', curvey
curves2y.append(curvey)
# Draw curve plot
fig += 1
plot = {}
metric = 'prob-cong-path'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Probability of congestion per path')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Probability of congestion (\\%)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
print 'curves1y = ', curves1y
print 'curves2y = ', curves2y
plot['data'] = []
for iPath in range(len(curves1y)):
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curves1y[iPath]
curve['hatch'] = '/' if iPath == 0 else '\\'
curve['label'] = 'Path class 1'
curve['color'] = [0.667, 0, 0.4]
plot['data'].append(curve)
for iPath in range(len(curves2y)):
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curves2y[iPath]
curve['hatch'] = '/' if iPath == 0 else '\\'
curve['label'] = 'Path class 2'
curve['color'] = [0., 0., 0.8]
plot['data'].append(curve)
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
## Emulator latency (processing delay) plot
curve1y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve1y.append(e.procDelayPeak)
print curve1y
curve2y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve2y.append(e.procDelayAverage)
print curve2y
curve3y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve3y.append(e.queuingDelay)
print curve3y
# Draw curve plot
fig += 1
plot = {}
metric = 'latency'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Emulator latency')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Processing delay (us)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
plot['data'] = []
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve1y
curve['hatch'] = ''
curve['label'] = 'Peak'
curve['color'] = [0., 0, 0.8]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve2y
curve['hatch'] = ''
curve['label'] = 'Average'
curve['color'] = [0., 0., 0.4]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'line'
curve['x'] = [plot['xmin'], plot['xmax']]
curve['y'] = [curve3y[0], curve3y[0]]
curve['label'] = 'Queuing delay of 1 frame'
curve['pattern'] = '--'
curve['color'] = [0., 0., 0.]
plot['data'].append(curve)
#
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
## Emulator throughput plot
curve1y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve1y.append(e.throughputIn)
print curve1y
curve2y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve2y.append(e.throughputOut)
print curve2y
# Draw curve plot
fig += 1
plot = {}
metric = 'throughput'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Throughput')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Throughput (Mbps)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
plot['data'] = []
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve1y
curve['hatch'] = ''
curve['label'] = 'In'
curve['color'] = [0., 0, 0.8]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve2y
curve['hatch'] = ''
curve['label'] = 'Out'
curve['color'] = [0., 0., 0.4]
plot['data'].append(curve)
# Hardcode 100 Mbps line
curve = {}
curve['type'] = 'line'
curve['x'] = [plot['xmin'], plot['xmax']]
curve['y'] = [100.0, 100.0]
curve['label'] = ''
curve['pattern'] = '--'
curve['color'] = [0., 0., 0.]
plot['data'].append(curve)
#
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
elif plotNumber == '5' or plotNumber == '5b' or plotNumber == '8' or plotNumber == '8b' or plotNumber == '8b+':
# Non-neutrality
curvex = []
curvexLabels = ['']
curvey = []
paramKeys = naturalSorted(experimentGroup1[key1].keys())
if plotNumber == '5' or plotNumber == '5b':
paramKeys = reversed(paramKeys)
for key2 in paramKeys:
curvex.append(1 + len(curvex))
curvexLabels.append(key2)
curvexLabels.append('')
print 'curvex =', curvex
print 'curvexLabels = ', curvexLabels
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(max(e.pCongLinkComputed) - min(e.pCongLinkComputed))
print 'curvey =', curvey
# Draw curve plot
fig += 1
plot = {}
metric = 'non-neutrality'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Computed non-neutrality')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Computed non-neutrality (0-100)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
plot['noLegend'] = 1
#
plot['data'] = []
curve = {}
curve['type'] = 'line'
curve['x'] = curvex
curve['y'] = curvey
curve['pattern'] = '-+'
#curve['label'] = 'Computed non-neutrality'
curve['color'] = [0.667, 1.0, 0.8]
plot['data'].append(curve)
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
else:
print 'Unknown plotNumber:', plotNumber
exit()
| gpl-2.0 |
mdrumond/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 26 | 13023 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
dominikwille/comp | sheet4/Aufgabe 1.py | 1 | 5871 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# @author Dominik Wille
# @author Stefan Pojtinger
# @tutor Alexander Schlaich
# @sheet 4
#
# Bitte die plots zum testen einkommentieren.
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
np.seterr(all='ignore')
#4.1.1
#Zur Lösung mit dem Gauß-Seidel Verfahren werden zunächst Funktionen,
#zum Umformen der eingegebenen Daten implementiert und Schließlich
#gaussseidel(L,D,R,b,k), b beschreibt hierbei das Konvergenzkriterium
#Für k != 0 gibt k die zu vernwendende Anzahl der Iterationsschritte an.
#Es werden für die gegebene Matrix 12 Iterationsschritte bnötigt.
#Einlesen der Daten
a = np.matrix([[3.5, 3., -0.5], [-1, 4, 4], [1./3., 3., 4.5]])
b = np.matrix([[7.5],[-6.5],[1.]])
#Funktionen zum Umformen der Daten
def untermat(a):
du = np.zeros(shape=(len(a),len(a)))
for i in range (0, len(a)):
for j in range (0, len(a)):
if i>j:
du[i,j]=a[i,j]
return du
def obermat(a):
do = np.zeros(shape=(len(a),len(a)))
for i in range (0, len(a)):
for j in range (0, len(a)):
if i<j:
do[i,j]=a[i,j]
return do
def diagmat(a):
d = np.zeros(shape=(len(a),len(a)))
for i in range (0, len(a)):
for j in range (0, len(a)):
if i==j:
d[i,j]=a[i,j]
return d
#Umformung der Daten
R = obermat(a)
L = untermat(a)
D = diagmat(a)
#Gauß-Seidel
def gaussseidel(L,D,R,b,k):
X = np.zeros(shape=(len(a),1))
#Hilfsfunktionen
def rechteseite(R,X,b,i):
return -R*np.matrix(X[:,i]).reshape((len(X), 1))+b
def einsetzen(F,d):
l = np.zeros(shape=(d.shape[0],1))
mod = 0
for i in range (0,d.shape[0]):
for j in range(0,i+1):
mod = mod+F[i,j]*l[j]
l[i] = (d[i]-mod)/F[i,i]
mod = 0
return l
#Auswertung Konv-Kriterium
if k != 0:
i = 0
konv=10
while konv>k:
X = np.concatenate((X,einsetzen(D+L,rechteseite(R,X,b,i))),axis=1)
konv = np.linalg.norm(X[:,i-1]-X[:,i])
i+=1
else:
#Auswertung nach Iterationsschritten
for i in range(0,500):
X = np.concatenate((X,einsetzen(D+L,rechteseite(R,X,b,i))),axis=1)
#Ausgabe
print 'Eine Auswertung mit dem Gauss-Seidel-Verfahren führte zu folgenden Werten:'
print 'Lösungsvektor:'
X = X[:,i]
print np.round(X,4)
print 'Anzahl der Schritte:'
print i+1
print 'Aufgabe 4.1.1:'
gaussseidel(L,D,R,b,0.5E-4)
#4.1.2
#Die Übergabewerte von jacobi(L,D,R,b,k) sind die selben wie in der
#vorherigen Aufgabe, die Anzahl der Iterationsschritte hat sich leicht erhöht
#während das Ergebniss auf 4 Dezimalstellen das selbe bleibt.
def jacobi(L,D,R,b,k):
X = np.zeros(shape=(len(a),1))
#Hilfsfunktionen
def rechteseite(R,X,b,i):
return (-L-R)*np.matrix(X[:,i]).reshape((len(X), 1))+b
def einsetzen(F,d):
l = np.zeros(shape=(d.shape[0],1))
for i in range (0,d.shape[0]):
l [i] = d[i]/F[i,i]
return l
#Auswertung Konv-Kriterium
if k != 0:
i = 0
konv=10
while konv>k:
X = np.concatenate((X,einsetzen(D,rechteseite(R,X,b,i))),axis=1)
konv = np.linalg.norm(X[:,i-1]-X[:,i])
i+=1
else:
#Auswertung nach Iterationsschritten
for i in range(0,500):
X = np.concatenate((X,einsetzen(D,rechteseite(R,X,b,i))),axis=1)
#Ausgabe
print 'Eine Auswertung mit dem Jacobi-Verfahren führte zu folgenden Werten:'
print 'Lösungsvektor:'
X = X[:,i]
print np.round(X,4)
print 'Anzahl der Schritte:'
print i+1
print 'Aufgabe 4.1.2:'
jacobi(L,D,R,b,0.5E-4)
#4.1.3
#Das Verfahren konvergiert für jede Matrix für die gilt: -(D+L)**(-1)*U<1
#4.1.4
#Die Gleichung lässt sich nur mit dem Gauß-Seidel-Verfahren lösen.
#Einlesen der neuen Daten
a = np.matrix([[5., 3., -1.,2.], [-3., 7., 6., -2.], [4., 4., 3., -3.], [-5., 2., 2., 4.]])
b = np.matrix([[8.],[1.],[7.],[2.]])
#Umformung der Daten
R = obermat(a)
L = untermat(a)
D = diagmat(a)
#Lösung des Systems
print 'Aufgabe 4.1.4:'
jacobi(L,D,R,b,0)
gaussseidel(L,D,R,b,0)
#4.1.5
import matplotlib.pyplot as plt
#from scipy.signal import argrelextrema
#Zum Plotten wurde eine neue Funktion definiert und der entstandene Plot wurde als
#Figure 1 exportiert. Am Plot lässt sich erkennen, dass das Verfahren für alle
#Werte in (0,2) mit ausnahme eines w-Wertes bei ca 1,1.
def gaussseidelrelax(w):
a = np.matrix([[5., 3., -1.,2.], [-3., 7., 6., -2.], [4., 4., 3., -3.], [-5., 2., 2., 4.]])
b = np.matrix([[8.],[1.],[7.],[2.]])
R = obermat(a)
L = untermat(a)
D = diagmat(a)
X = np.zeros(shape=(len(a),1))
#Hilfsfunktionen
def rechteseite(R,X,b,i):
return ((1./w)*D-D-R)*np.matrix(X[:,i]).reshape((len(X), 1))+b
def einsetzen(F,d):
l = np.zeros(shape=(d.shape[0],1))
mod = 0
for i in range (0,d.shape[0]):
for j in range(0,i+1):
mod = mod+F[i,j]*l[j]
l[i] = (d[i]-mod)/F[i,i]
mod = 0
return l
#Auswertung Konv-Kriterium
i = 0
konv=10
while konv>0.5E-4:
X = np.concatenate((X,einsetzen((1./w)*D+L,rechteseite(R,X,b,i))),axis=1)
konv = np.linalg.norm(X[:,i-1]-X[:,i])
i+=1
#Ausgabe
return i+1
#Plot:
#xp=np.arange(0.01,2,0.01)
#vgaussseidelrelax=np.vectorize(gaussseidelrelax)
#yp=vgaussseidelrelax(xp)
#plt.plot(xp,yp)
#plt.show()
#print argrelextrema(yp, np.greater)
#for i in range(0,len(argrelextrema(yp, np.greater))):
# print xp[argrelextrema(yp, np.greater)]
| bsd-2-clause |
jreback/pandas | pandas/tests/series/methods/test_align.py | 2 | 5341 | import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Series, date_range, period_range
import pandas._testing as tm
@pytest.mark.parametrize(
"first_slice,second_slice",
[
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]],
],
)
@pytest.mark.parametrize("fill", [None, -1])
def test_align(datetime_series, first_slice, second_slice, join_type, fill):
a = datetime_series[slice(*first_slice)]
b = datetime_series[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, fill_value=fill)
join_index = a.index.join(b.index, how=join_type)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
tm.assert_series_equal(aa, ea)
tm.assert_series_equal(ab, eb)
assert aa.name == "ts"
assert ea.name == "ts"
assert ab.name == "ts"
assert eb.name == "ts"
@pytest.mark.parametrize(
"first_slice,second_slice",
[
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]],
],
)
@pytest.mark.parametrize("method", ["pad", "bfill"])
@pytest.mark.parametrize("limit", [None, 1])
def test_align_fill_method(
datetime_series, first_slice, second_slice, join_type, method, limit
):
a = datetime_series[slice(*first_slice)]
b = datetime_series[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
join_index = a.index.join(b.index, how=join_type)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
tm.assert_series_equal(aa, ea)
tm.assert_series_equal(ab, eb)
def test_align_nocopy(datetime_series):
b = datetime_series[:5].copy()
# do copy
a = datetime_series.copy()
ra, _ = a.align(b, join="left")
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = datetime_series.copy()
ra, _ = a.align(b, join="left", copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = datetime_series.copy()
b = datetime_series[:5].copy()
_, rb = a.align(b, join="right")
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = datetime_series.copy()
b = datetime_series[:5].copy()
_, rb = a.align(b, join="right", copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(datetime_series):
a, b = datetime_series.align(datetime_series, copy=False)
assert a.index is datetime_series.index
assert b.index is datetime_series.index
a, b = datetime_series.align(datetime_series, copy=True)
assert a.index is not datetime_series.index
assert b.index is not datetime_series.index
def test_align_multiindex():
# GH 10665
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
s1 = Series(np.arange(12, dtype="int64"), index=midx)
s2 = Series(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join="left")
res2l, res2r = s2.align(s1, join="right")
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join="right")
res2l, res2r = s2.align(s1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
def test_align_with_dataframe_method(method):
# GH31788
ser = Series(range(3), index=range(3))
df = pd.DataFrame(0.0, index=range(3), columns=range(3))
result_ser, result_df = ser.align(df, method=method)
tm.assert_series_equal(result_ser, ser)
tm.assert_frame_equal(result_df, df)
def test_align_dt64tzindex_mismatched_tzs():
idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(idx1)), index=idx1)
ser_central = ser.tz_convert("US/Central")
# different timezones convert to UTC
new1, new2 = ser.align(ser_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
def test_align_periodindex(join_type):
rng = period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
# TODO: assert something?
ts.align(ts[::2], join=join_type)
| bsd-3-clause |
baymeevag/science_bot | generators/Markov.py | 1 | 1400 | from random import randint, choice
import tweepy
import time
import pandas as pd
import os
from corpus.CorpusCollector import CorpusCollector
from utils.config import START_TOKEN, END_TOKEN
class Markov:
def __init__(self, topic): # path to corpus
self.transition = {}
self._init_corpus(topic)
def _init_corpus(self, topic):
collector = CorpusCollector(topic)
corpus = pd.read_csv(collector.file_name)['article_name'].values
for line in corpus:
words = [START_TOKEN] + line.split() + [END_TOKEN]
for word1, word2 in zip(words[:-1], words[1:]):
if word1 in self.transition:
self.transition[word1].append(word2)
else:
self.transition[word1] = [word2]
def generate(self):
result = [START_TOKEN]
current_word = result[-1]
while current_word in self.transition.keys() and \
self.transition[current_word] and \
current_word != END_TOKEN:
current_word = choice(self.transition[current_word])
result.append(current_word)
return ' '.join(result[1:-1])
def get_tweet(self):
tweet = self.generate()
while len(tweet) > 140 or len(tweet.split()) < 5:
tweet = self.generate()
return tweet
| gpl-3.0 |
rubikloud/scikit-learn | sklearn/kernel_ridge.py | 37 | 6556 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
Alex-Ian-Hamilton/sunpy | sunpy/instr/goes.py | 1 | 57937 | """
Contains functions useful for analysing GOES/XRS data.
Each of the Geostationary Operational Environmental Satellite (GOES) series
since the mid-1970s has carried an X-Ray Sensor (XRS) which observes
full-disk-integrated solar flux in two broadband channels:
1--8 angstrom (long); and 0.5--4 angstrom (short). For more information on
the GOES/XRS instrument, see Hanser & Sellers (1996). GOES/XRS has become
the "standard candle" for solar coronal observations due its longevity and
consistency. The GOES event list, based on GOES/XRS observations, has
become the standard solar flare catalogue.
See http://www.ngdc.noaa.gov/stp/solar/solarflares.html for information
on the GOES event list definitions and data.
The functions in this module provide useful software to analyse GOES/XRS
observations. First they allow the GOES event list to be imported into a
python session (get_goes_event_list()).
They also allow the thermodynamic properties of the emitting solar plasma to
be determined. Temperature and emission measure are obtained using
calculate_temperature_em(), which calls _goes_chianti_tem(), which in turn
calls _goes_get_chianti_temp() and _goes_get_chianti_em(). These two
functions currently rely on lookup tables relating the GOES fluxes to the
isothermal temperature and volume emission measure. These tables were
calculated by functions in SolarSoftWare (SSW) using the CHIANTI atomic
physics database (Dere et al. 2009). For more detail, see the docstring of
calculate_temperature_em() and references therein.
The radiative loss rate of the soft X-ray-emitting plasma across all
wavelengths can be found with calculate_radiative_loss_rate(). This function
calls _calc_rad_loss() which, like _goes_get_chianti_temp() and
_goes_get_chianti_em(), makes use of a look up table calculated by functions
in SSW using CHIANTI. This table relates the temperature and emission
measure of the emitting solar plasma to the thermal energy radiative over
all wavelengths. For more information on how this is done, see
the docstring of _calc_rad_loss() and reference therein.
Meanwhile, the X-ray luminosity in the two GOES passbands can be
obtained by calculate_xray_luminosity(). To do so, this function calls
_goes_lx() and calc_xraylum().
References
----------
Hanser, F.A., & Sellers, F.B. 1996, Proc. SPIE, 2812, 344
Dere, K.P., et al. 2009 A&A, 498, 915 DOI: 10.1051/0004-6361/200911712
"""
from __future__ import absolute_import, division
import os.path
import datetime
import csv
import copy
import socket
from itertools import dropwhile
import numpy as np
from scipy import interpolate
from scipy.integrate import trapz, cumtrapz
import astropy.units as u
import pandas
from sunpy.net import hek
from sunpy.time import parse_time
from sunpy import config
from sunpy import lightcurve
from sunpy.util.net import check_download_file
from sunpy import sun
GOES_CONVERSION_DICT = {'X': u.Quantity(1e-4, "W/m^2"),
'M': u.Quantity(1e-5, "W/m^2"),
'C': u.Quantity(1e-6, "W/m^2"),
'B': u.Quantity(1e-7, "W/m^2"),
'A': u.Quantity(1e-8, "W/m^2")}
__all__ = ['get_goes_event_list', 'calculate_temperature_em',
'calculate_radiative_loss_rate', 'calculate_xray_luminosity', 'flux_to_flareclass',
'flareclass_to_flux']
try:
# Check required data files are present in user's default download dir
# Define location where GOES data files are stored.
# Manually resolve the hostname
HOST = socket.gethostbyname_ex('hesperia.gsfc.nasa.gov')[0]
except socket.gaierror:
HOST = ''
GOES_REMOTE_PATH = "http://{0}/ssw/gen/idl/synoptic/goes/".format(HOST)
# Define location where data files should be downloaded to.
DATA_PATH = config.get("downloads", "download_dir")
# Define variables for file names
FILE_TEMP_COR = "goes_chianti_temp_cor.csv"
FILE_TEMP_PHO = "goes_chianti_temp_pho.csv"
FILE_EM_COR = "goes_chianti_em_cor.csv"
FILE_EM_PHO = "goes_chianti_em_pho.csv"
FILE_RAD_COR = "chianti7p1_rad_loss.txt"
def get_goes_event_list(timerange, goes_class_filter=None):
"""
Retrieve list of flares detected by GOES within a given time range.
Parameters
----------
timerange : `sunpy.time.TimeRange`
The time range to download the event list for.
goes_class_filter: (optional) string
A string specifying a minimum GOES class for inclusion in the list,
e.g. 'M1', 'X2'.
"""
# use HEK module to search for GOES events
client = hek.HEKClient()
event_type = 'FL'
tstart = timerange.start
tend = timerange.end
# query the HEK for a list of events detected by the GOES instrument
# between tstart and tend (using a GOES-class filter)
if goes_class_filter:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.FL.GOESCls > goes_class_filter,
hek.attrs.OBS.Observatory == 'GOES')
else:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.OBS.Observatory == 'GOES')
# want to condense the results of the query into a more manageable
# dictionary
# keep event data, start time, peak time, end time, GOES-class,
# location, active region source (as per GOES list standard)
# make this into a list of dictionaries
goes_event_list = []
for r in result:
goes_event = {
'event_date': parse_time(r['event_starttime']).date().strftime(
'%Y-%m-%d'),
'start_time': parse_time(r['event_starttime']),
'peak_time': parse_time(r['event_peaktime']),
'end_time': parse_time(r['event_endtime']),
'goes_class': str(r['fl_goescls']),
'goes_location': (r['event_coord1'], r['event_coord2']),
'noaa_active_region': r['ar_noaanum']
}
goes_event_list.append(goes_event)
return goes_event_list
def calculate_temperature_em(goeslc, abundances="coronal",
download=False, download_dir=None):
"""
Calculates temperature and emission measure from a GOESLightCurve.
This function calculates the isothermal temperature and
corresponding volume emission measure of the solar soft X-ray
emitting plasma observed by the GOES/XRS. This is done using the
observed flux ratio of the short (0.5-4 angstrom) to long (1-8 angstrom)
channels. The results are returned in a new LightCurve object which
contains metadata and flux data of the input LightCurve object in
addition to the newly found temperature and emission measure values.
Parameters
----------
goeslc : `~sunpy.lightcurve.LightCurve`
LightCurve object containing GOES flux data which MUST
be in units of W/m^2.
abundances : (optional) string equalling 'coronal' or 'photospheric'
States whether photospheric or coronal abundances should be
assumed.
Default='coronal'
download : (optional) `bool`
If True, the GOES temperature and emission measure data files are
downloaded. It is important to do this if a new version of the files
has been generated due to a new CHIANTI version being released or the
launch of new GOES satellites since these files were last downloaded.
Default=False
download_dir : (optional) `string`
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
lc_new : `~sunpy.lightcurve.LightCurve`
Contains same metadata and data as input GOESLightCurve with the
following two additional data columns:
| lc_new.data.temperature - Array of temperatures [MK]
| lc_new.data.em - Array of volume emission measures [cm**-3]
Notes
-----
The temperature and volume emission measure are calculated here
using the methods of White et al. (2005) who used the
CHIANTI atomic physics database to model the response of the ratio
of the short (0.5-4 angstrom) to long (1-8 angstrom) channels of the
XRSs onboard various GOES satellites. This method assumes an
isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_chianti_tem.pro in SolarSoftWare
written in IDL by Stephen White.
Recent fluxes released to the public are scaled to be consistent
with GOES-7. In fact these recent fluxes are correct and so this
correction must be removed before proceeding to use transfer
functions.
Email Rodney Viereck (NOAA) for more information.
Measurements of short channel flux of less than 1e-10 W/m**2 or
long channel flux less than 3e-8 W/m**2 are not considered good.
Ratio values corresponding to such fluxes are set to 0.003.
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005,
Sol. Phys., 227, 231, DOI: 10.1007/s11207-005-2445-z
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., &
Vittorio, N. 1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> from sunpy.instr.goes import calculate_temperature_em
>>> import sunpy.lightcurve as lc
>>> time1 = "2014-01-01 00:00:00"
>>> time2 = "2014-01-01 00:00:08"
>>> goeslc = lc.GOESLightCurve.create(time1, time2)
>>> goeslc.data
xrsa xrsb
2014-01-01 00:00:00.421999 9.187300e-08 0.000004
2014-01-01 00:00:02.468999 9.187300e-08 0.000004
2014-01-01 00:00:04.518999 9.187300e-08 0.000004
2014-01-01 00:00:06.564999 9.298800e-08 0.000004
>>> goeslc_new = calculate_temperature_em(goeslc)
>>> goeslc_new.data
xrsa xrsb temperature em
2014-01-01 00:00:00.421999 9.187300e-08 0.000004 6.270239 6.440648e+48
2014-01-01 00:00:02.468999 9.187300e-08 0.000004 6.270239 6.440648e+48
2014-01-01 00:00:04.518999 9.187300e-08 0.000004 6.273917 6.422208e+48
2014-01-01 00:00:06.564999 9.298800e-08 0.000004 6.304001 6.350370e+48
"""
# Check that input argument is of correct type
if not isinstance(goeslc, lightcurve.LightCurve):
raise TypeError("goeslc must be a LightCurve object.")
if not download_dir:
download_dir = DATA_PATH
# Find temperature and emission measure with _goes_chianti_tem
temp, em = _goes_chianti_tem(
u.Quantity(goeslc.data.xrsb, unit=u.W/(u.m)**2),
u.Quantity(goeslc.data.xrsa, unit=u.W/(u.m)**2),
satellite=goeslc.meta["TELESCOP"].split()[1],
date=goeslc.data.index[0],
abundances=abundances, download=download, download_dir=download_dir)
# Enter results into new version of GOES LightCurve Object
# Use copy.deepcopy for replicating meta and data so that input
# lightcurve is not altered.
lc_new = lightcurve.LightCurve(meta=copy.deepcopy(goeslc.meta),
data=copy.deepcopy(goeslc.data))
lc_new.data["temperature"] = temp.value
lc_new.data["em"] = em.value
return lc_new
@u.quantity_input(longflux=u.W/u.m/u.m, shortflux=u.W/u.m/u.m)
def _goes_chianti_tem(longflux, shortflux, satellite=8,
date=datetime.datetime.today(), abundances="coronal",
download=False, download_dir=None):
"""
Calculates temperature and emission measure from GOES/XRS data.
This function calculates the isothermal temperature and volume
emission measure of the solar soft X-ray emitting plasma observed by
the GOES/XRS. This is done using the observed flux ratio of the
short (0.5-4 angstrom) to long (1-8 angstrom) channels.
Parameters
----------
longflux, shortflux : `~astropy.units.Quantity`
Arrays containing the long and short GOES/XRS flux measurements
respectively as a function of time. Must be of same length. [W/m**2].
satellite : int (optional)
Number of GOES satellite used to make observations, important for
correct calibration of data.
Default=8
date : `datetime.datetime` or `str`
Date when observations made. Important for correctcalibration.
Default=today
abundances : (optional) string equalling 'coronal' or 'photospheric'
States whether photospheric or coronal abundances should be
assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature and emission measure data files are
downloaded. It is important to do this if a new version of the files
has been generated due to a new CHIANTI version being released or the
launch of new GOES satellites since these files were last downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
temp : `~astropy.units.Quantity`
Array of temperature values of same length as longflux and
shortflux. Units=[MK]
em : `~astropy.units.Quantity`
Array of volume emission measure values of same length as longflux
and shortflux. Units=[10**49 cm**-3]
Notes
-----
The temperature and volume emission measure are calculated here
using the methods of White et al. (2005) who used the
CHIANTI atomic physics database to model the response of the ratio
of the short (0.5-4 angstrom) to long (1-8 angstrom) channels of the
XRSs onboard various GOES satellites. This method assumes an
isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_chianti_tem.pro in SolarSoftWare
written in IDL by Stephen White.
Recent fluxes released to the public are scaled to be consistent
with GOES-7. In fact these recent fluxes are correct and so this
correction must be removed before proceeding to use transfer
functions.
Email Rodney Viereck (NOAA) for more information.
Measurements of short channel flux of less than 1e-10 W/m**2 or
long channel flux less than 3e-8 W/m**2 are not considered good.
Ratio values corresponding to such fluxes are set to 0.003.
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005,
Sol. Phys., 227, 231, DOI: 10.1007/s11207-005-2445-z
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., &
Vittorio, N. 1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> from sunpy.instr.goes import _goes_chianti_tem
>>> from astropy.units import Quantity
>>> longflux = Quantity([7e-6, 7e-6], unit="W/m/m")
>>> shortflux = Quantity([7e-7, 7e-7], unit="W/m/m")
>>> temp, em = _goes_chianti_tem(longflux, shortflux, satellite=15,
... date='2014-04-16',
... abundances="coronal")
>>> temp
<Quantity [ 11.28295376, 11.28295376] MK>
>>> em
<Quantity [ 4.78577516e+48, 4.78577516e+48] 1 / cm3>
"""
if not download_dir:
download_dir = DATA_PATH
# ENSURE INPUTS ARE OF CORRECT TYPE AND VALID VALUES
longflux = longflux.to(u.W/u.m/u.m)
shortflux = shortflux.to(u.W/u.m/u.m)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
date = parse_time(date)
# Check flux arrays are of same length.
if len(longflux) != len(shortflux):
raise ValueError(
"longflux and shortflux must have same number of elements.")
# PREPARE DATA
# GOES 6 long channel flux before 1983-Jun-28 must be corrected by a
# factor of 4.43/5.32
if date < datetime.datetime(1983, 6, 28) and satellite == 6:
longflux_corrected = longflux*(4.43/5.32)
else:
longflux_corrected = longflux
# Un-scale fluxes if GOES satellite is after 7. See 2nd paragraph
# in Notes section of docstring above.
if satellite > 7:
longflux_corrected = longflux_corrected / 0.7
shortflux_corrected = shortflux / 0.85
else:
shortflux_corrected = shortflux
# Calculate short to long channel ratio.
# Data which is not good have their ratio value set to 0.003.
# See Notes section in docstring above.
index = np.logical_or(
shortflux_corrected < u.Quantity(1e-10, unit="W/m**2"),
longflux_corrected < u.Quantity(3e-8, unit="W/m**2"))
fluxratio = shortflux_corrected / longflux_corrected
fluxratio.value[index] = u.Quantity(0.003, unit="W/m**2")
# FIND TEMPERATURE AND EMISSION MEASURE FROM FUNCTIONS BELOW
temp = _goes_get_chianti_temp(fluxratio, satellite=satellite,
abundances=abundances, download=download,
download_dir=download_dir)
em = _goes_get_chianti_em(longflux_corrected, temp, satellite=satellite,
abundances=abundances, download=download,
download_dir=download_dir)
return temp, em
@u.quantity_input(fluxratio=u.dimensionless_unscaled)
def _goes_get_chianti_temp(fluxratio, satellite=8, abundances="coronal",
download=False, download_dir=None):
"""
Calculates temperature from GOES flux ratio.
This function calculates the isothermal temperature of the solar
soft X-ray emitting plasma observed by the GOES/XRS from the
observed flux ratio of the short (0.5-4 angstrom) to
long (1-8 angstrom) channels. This function is not intended to be
called directly but by _goes_chianti_tem(), although it can be used
independently. However, if used independently data preparation,
such as correctly rescaling fluxes for some satellites etc. will
not be carried out. This is done in _goes_chianti_tem().
Parameters
----------
fluxratio : `~astropy.units.Quantity`
Array containing the ratio of short channel to long channel
GOES/XRS flux measurements.
satellite : int (optional)
Number of GOES satellite used to make observations. Important for
correct calibration of data.
Default=8
abundances : (optional) string equalling 'coronal' or 'photospheric'
States whether photospheric or coronal abundances should be
assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature data files are downloaded.
It is important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the launch
of new GOES satellites since these files were last downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature data file to.
Default=SunPy default download directory
Returns
-------
temp : `~astropy.units.Quantity`
Array of temperature values of same length as longflux and
shortflux. Units=[MK]
Notes
-----
This function uses csv files representing the modelled relationship
between temperature of the soft X-ray emitting plasma and the
short to long channel GOES flux ratio. goes_chianti_temp_cor.csv
is used when coronal abundances are assumed while
goes_chianti_temp_pho.csv is used when photospheric abundances are
assumed. (See make_goes_chianti_temp.py for more detail.)
These files were calculated using the methods of White et al. (2005)
who used the CHIANTI atomic physics database to model the response
of the ratio of the short (0.5-4 angstrom) to long (1-8 angstrom)
channels of the XRSs onboard various GOES satellites. This method
assumes an isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_get_chianti_temp.pro in
SolarSoftWare written in IDL by Stephen White.
For correct preparation of GOES data before calculating temperature
see _goes_chianti_tem() (Notes section of docstring).
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005,
Sol. Phys., 227, 231, DOI: 10.1007/s11207-005-2445-z
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., &
Vittorio, N. 1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> from astropy.units import Quantity
>>> from sunpy.instr.goes import _goes_get_chianti_temp
>>> fluxratio = Quantity([0.1,0.1])
>>> temp = _goes_get_chianti_temp(fluxratio, satellite=15,
... abundances="coronal")
>>> temp
<Quantity [ 12.27557778, 12.27557778] MK>
"""
if not download_dir:
download_dir = DATA_PATH
# If download kwarg is True, or required data files cannot be
# found locally, download required data files.
check_download_file(FILE_TEMP_COR, GOES_REMOTE_PATH, download_dir,
replace=download)
check_download_file(FILE_TEMP_PHO, GOES_REMOTE_PATH, download_dir,
replace=download)
# check inputs are correct
fluxratio = fluxratio.decompose()
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
# if abundance input is valid create file suffix, abund, equalling
# of 'cor' or 'pho'.
if abundances == "coronal":
data_file = FILE_TEMP_COR
elif abundances == "photospheric":
data_file = FILE_TEMP_PHO
else:
raise ValueError("abundances must be a string equalling "
"'coronal' or 'photospheric'.")
# Initialize lists to hold model data of flux ratio - temperature
# relationship read in from csv file
modeltemp = [] # modelled temperature is in log_10 space in units of MK
modelratio = []
# Determine name of column in csv file containing model ratio values
# for relevant GOES satellite
label = "ratioGOES{0}".format(satellite)
# Read data representing appropriate temperature--flux ratio
# relationship depending on satellite number and assumed abundances.
with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
startline = dropwhile(lambda l: l.startswith("#"), csvfile)
csvreader = csv.DictReader(startline, delimiter=";")
for row in csvreader:
modeltemp.append(float(row["log10temp_MK"]))
modelratio.append(float(row[label]))
modeltemp = np.asarray(modeltemp)
modelratio = np.asarray(modelratio)
# Ensure input values of flux ratio are within limits of model table
if np.min(fluxratio) < np.min(modelratio) or \
np.max(fluxratio) > np.max(modelratio):
raise ValueError(
"For GOES {0}, all values in fluxratio input must be within " +
"the range {1} - {2}.".format(satellite, np.min(modelratio),
np.max(modelratio)))
# Perform spline fit to model data to get temperatures for input
# values of flux ratio
spline = interpolate.splrep(modelratio, modeltemp, s=0)
temp = 10.**interpolate.splev(fluxratio.value, spline, der=0)
temp = u.Quantity(temp, unit='MK')
return temp
@u.quantity_input(longflux=u.W/u.m/u.m, temp=u.MK)
def _goes_get_chianti_em(longflux, temp, satellite=8, abundances="coronal",
download=False, download_dir=None):
"""
Calculates emission measure from GOES 1-8A flux and temperature.
This function calculates the emission measure of the solar
soft X-ray emitting plasma observed by the GOES/XRS from the
the ratio of the isothermal temperature and observed long channel
(1-8 angstrom) flux which scales with the emission measure.
This function is not intended to be called directly but by
_goes_chianti_tem(), although it can be used independently.
However, if used independently data preparation, such as correctly
rescaling fluxes for some satellites etc. will not be carried out.
This is done in _goes_chianti_tem().
Parameters
----------
longflux : `~astropy.units.Quantity`
Array containing the observed GOES/XRS long channel flux.
Units=[W/m**2]
temp : `~astropy.units.Quantity`
Array containing the GOES temperature. Units=[MK]
satellite : int (optional)
Number of GOES satellite used to make observations.
Important for correct calibration of data.
Default=8
abundances : (optional) {'coronal' | 'photospheric'}
States whether photospheric or coronal abundances should be
assumed.
Default='coronal'
download : (optional) `bool`
If True, the GOES emission measure data file is downloaded.
It is important to do this if a new version of the file has been
generated due to a new CHIANTI version being released or the launch of
new GOES satellites since these file was last downloaded.
Default=False
download_dir : (optional) `str`
The directory to download the GOES emission measure data file to.
Default=SunPy default download directory
Returns
-------
em : `~astropy.units.Quantity`
Array of emission measure values of same length as longflux
and temp. [cm**-3]
Notes
-----
This function uses csv files representing the modelled relationship
between the temperature of the solar soft X-ray emitting plasma
and the resulting observed flux in the GOES/XRS long channel
(1-8 angstroms). goes_chianti_em_cor.csv is used when coronal
abundances are assumed while goes_chianti_em_pho.csv is used when
photospheric abundances are assumed.
(See make_goes_chianti_temp.py for more detail.)
These files were calculated using the methods of White et al. (2005)
who used the CHIANTI atomic physics database and GOES transfer
functions to model the response of the long channel to the
temperature of the emitting plasma for XRSs onboard various GOES
satellites. The emission measure can then be found by scaling the
ratio of these two properties. This method assumes an isothermal
plasma, the ionisation equilibria of Mazzotta et al. (1998), and
a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_get_chianti_temp.pro in
SolarSoftWare written in IDL by Stephen White.
For correct preparation of GOES data before calculating temperature
see _goes_chianti_tem() (Notes section of docstring).
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005,
Sol. Phys., 227, 231, DOI: 10.1007/s11207-005-2445-z
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., &
Vittorio, N. 1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> import astropy.units as u
>>> from sunpy.instr.goes import _goes_get_chianti_em
>>> longflux = u.Quantity([7e-6,7e-6], unit=u.W/u.m/u.m)
>>> temp = u.Quantity([11, 11], unit=u.MK)
>>> em = _goes_get_chianti_em(longflux, temp, satellite=15,
... abundances="coronal")
>>> em
<Quantity [ 3.45200672e+48, 3.45200672e+48] 1 / cm3>
"""
if not download_dir:
download_dir = DATA_PATH
# If download kwarg is True, or required data files cannot be
# found locally, download required data files.
check_download_file(FILE_EM_COR, GOES_REMOTE_PATH, download_dir,
replace=download)
check_download_file(FILE_EM_PHO, GOES_REMOTE_PATH, download_dir,
replace=download)
# Check inputs are of correct type
longflux = longflux.to(u.W/u.m**2)
temp = temp.to(u.MK)
log10_temp = np.log10(temp.value)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
# if abundance input is valid create file suffix, abund, equalling
# of 'cor' or 'pho'.
if abundances == "coronal":
data_file = FILE_EM_COR
elif abundances == "photospheric":
data_file = FILE_EM_PHO
else:
raise ValueError("abundances must be a string equalling "
"'coronal' or 'photospheric'.")
# check input arrays are of same length
if len(longflux) != len(temp):
raise ValueError("longflux and temp must have same number of "
"elements.")
# Initialize lists to hold model data of temperature - long channel
# flux relationship read in from csv file.
modeltemp = [] # modelled temperature is in log_10 space in units of MK
modelflux = []
# Determine name of column in csv file containing model ratio values
# for relevant GOES satellite
label = "longfluxGOES{0}".format(satellite)
# Read data representing appropriate temperature--long flux
# relationship depending on satellite number and assumed abundances.
with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
startline = dropwhile(lambda l: l.startswith("#"), csvfile)
csvreader = csv.DictReader(startline, delimiter=";")
for row in csvreader:
modeltemp.append(float(row["log10temp_MK"]))
modelflux.append(float(row[label]))
modeltemp = np.asarray(modeltemp)
modelflux = np.asarray(modelflux)
# Ensure input values of flux ratio are within limits of model table
if np.min(log10_temp) < np.min(modeltemp) or \
np.max(log10_temp) > np.max(modeltemp) or \
np.isnan(np.min(log10_temp)):
raise ValueError("All values in temp must be within the range "
"{0} - {1} MK.".format(np.min(10**modeltemp),
np.max(10**modeltemp)))
# Perform spline fit to model data
spline = interpolate.splrep(modeltemp, modelflux, s=0)
denom = interpolate.splev(log10_temp, spline, der=0)
em = longflux.value/denom * 1e55
em = u.Quantity(em, unit='cm**(-3)')
return em
def calculate_radiative_loss_rate(goeslc, force_download=False,
download_dir=None):
"""
Calculates radiative loss rate from GOES observations.
This function calculates the radiative loss rate as a function of
time of solar soft X-ray-emitting plasma across all wavelengths given a
LightCurve object containing GOES data. The radiative loss rate is
determined from the GOES isothermal temperature and volume emission
measure as a function of time, as calculated by
`~calculate_temperature_em()`. See docstring of that function for more
details. If the LightCurve object does not contain the temperatures and
emission measures, but only contain the GOES fluxes, then the temperature
and emission measures are calculated using calculate_temperature_em().
The unit of the resulting radiative loss rates is W. Once
the radiative loss rates have been found, they are returned as part of a
new LightCurve object also containing the metadata, GOES fluxes and
corresponding temperatures and emission measures of the input LightCurve
object.
Parameters
----------
goeslc : `~sunpy.lightcurve.LightCurve`
LightCurve object containing GOES data. The units of these
data MUST be W/m^2 (flux), MK (temperature) and cm^-3
(emission measure). If LightCurve object does not contain
temperature and emission measure values, they are calculated from
the flux values using calculate_temperature_em().
force_download : (optional) `bool`
If True, the GOES radiative loss data file is downloaded even if
already locally stored. It is important to do this if a new version
of the file has been generated due to a new CHIANTI version being
released or the launch of new GOES satellites.
Default=False
download_dir : (optional) `str`
The directory to download the GOES radiative loss data file to.
Default=SunPy default download directory
Returns
-------
lc_new : `~sunpy.lightcurve.LightCurve`
Contains same metadata and data as input LightCurve with the
following additional data columns:
| lc_new.data.temperature - Array of temperature values [MK]
| lc_new.data.em - Array of volume emission measure values [cm**-3]
| lc_new.data.rad_loss_rate - radiative loss rate of the coronal soft
X-ray-emitting plasma across all wavelengths [W]
Notes
-----
The GOES radiative loss rates are calculated using a csv file containing
a table of radiative loss rate per unit emission measure at various
temperatures. The appropriate values are then found via interpolation.
This table was generated using CHIANTI atomic physics database employing
the methods of Cox & Tucker (1969). Coronal abundances, a default
density of 10**10 cm**-3, and ionization equilibrium of
Mazzotta et al. (1998) were used.
References
----------
.. [1] Cox, D.P., Tucker, W.H. 1969, ApJ, 157, 1157, DOI: 10.1086/150144
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> from sunpy.instr.goes import calculate_radiative_loss_rate
>>> import sunpy.lightcurve as lc
>>> time1 = "2014-01-01 00:00:00"
>>> time2 = "2014-01-01 00:00:08"
>>> goeslc = lc.GOESLightCurve.create(time1, time2)
>>> goeslc.data
xrsa xrsb
2014-01-01 00:00:00.421999 9.187300e-08 0.000004
2014-01-01 00:00:02.468999 9.187300e-08 0.000004
2014-01-01 00:00:04.518999 9.187300e-08 0.000004
2014-01-01 00:00:06.564999 9.298800e-08 0.000004
>>> goeslc_new = calculate_radiative_loss_rate(goeslc)
>>> goeslc_new.data # doctest: +NORMALIZE_WHITESPACE
xrsa xrsb temperature em \\
2014-01-01 00:00:00.421999 9.187300e-08 0.000004 6.270239 6.440648e+48
2014-01-01 00:00:02.468999 9.187300e-08 0.000004 6.270239 6.440648e+48
2014-01-01 00:00:04.518999 9.187300e-08 0.000004 6.273917 6.422208e+48
2014-01-01 00:00:06.564999 9.298800e-08 0.000004 6.304001 6.350370e+48
<BLANKLINE>
rad_loss_rate
2014-01-01 00:00:00.421999 5.449144e+19
2014-01-01 00:00:02.468999 5.449144e+19
2014-01-01 00:00:04.518999 5.434659e+19
2014-01-01 00:00:06.564999 5.382823e+19
"""
if not download_dir:
download_dir = DATA_PATH
# Check that input argument is of correct type
if not isinstance(goeslc, lightcurve.LightCurve):
raise TypeError("goeslc must be a LightCurve object.")
# extract temperature and emission measure from GOESLightCurve
# object and change type to that required by _calc_rad_loss().
# If LightCurve object does not contain temperature and
# emission measure, calculate using calculate_temperature_em()
if 'temperature' in goeslc.data and 'em' in goeslc.data:
# Use copy.deepcopy for replicating meta and data so that input
# lightcurve is not altered.
lc_new = lightcurve.LightCurve(meta=copy.deepcopy(goeslc.meta),
data=copy.deepcopy(goeslc.data))
else:
lc_new = calculate_temperature_em(goeslc)
temp = u.Quantity(np.asarray(lc_new.data.temperature, dtype=np.float64),
unit=u.MK)
em = u.Quantity(np.asarray(lc_new.data.em, dtype=np.float64),
unit=u.cm**(-3))
# Find radiative loss rate with _calc_rad_loss()
rad_loss_out = _calc_rad_loss(temp, em, force_download=force_download,
download_dir=download_dir)
# Enter results into new version of GOES LightCurve Object
lc_new.data["rad_loss_rate"] = rad_loss_out["rad_loss_rate"].to("W").value
return lc_new
@u.quantity_input(temp=u.MK, em=u.cm**(-3))
def _calc_rad_loss(temp, em, obstime=None, force_download=False,
download_dir=None):
"""
Finds radiative loss rate of coronal plasma over all wavelengths.
This function calculates the radiative loss rate of solar coronal
soft X-ray-emitting plasma across all wavelengths given an isothermal
temperature and emission measure. The units of the results are
W. This function is based on calc_rad_loss.pro in SSW IDL.
In addition, if obstime keyword is set, giving the times to which
the temperature and emission measure values correspond, the
radiated losses integrated over time are also calculated.
Parameters
----------
temp : `~astropy.units.Quantity`
Array containing the temperature of the coronal plasma at
different times. Units=[MK]
em : `~astropy.units.Quantity`
Array containing the emission measure of the coronal plasma
at the same times corresponding to the temperatures in temp.
Must be same length as temp. Units=[cm**-3]
obstime : (optional) array-like of `datetime.datetime` objects
Array of measurement times to which temperature and
emission measure values correspond. Must be same length
as temp and em. If this keyword is set, the integrated
radiated energy is calculated.
force_download : (optional) bool
If True, the GOES radiative loss data file is downloaded. It is
important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the
launch of new GOES satellites.
Default=False
download_dir : (optional) string
The directory to download the GOES radiative loss data file to.
Default=SunPy default download directory
Returns
-------
rad_loss_out : `dict` of `~astropy.units.quantity.Quantity` objects
Contains the following keys.
| "rad_loss_rate" - radiative loss rate of the soft X-ray-emitting
plasma across all wavelengths corresponding to temperatures and
emission measures in temp and em Quantity inputs.
| "rad_loss_cumul" - cumulative radiative losses as a function of
time. (Only if obstime kwarg is NOT None.)
| "rad_loss_int" - total radiative losses as a function of time.
(Only if obstime kwarg is not None.) Array containing radiative
loss rates of the coronal plasma corresponding to temperatures and
emission measures in temp and em arrays.
Notes
-----
This function calls a csv file containing a table of radiative loss
rate per unit emission measure at various temperatures. The
appropriate values are then found via interpolation. This table
was generated using CHIANTI atomic physics database employing the
methods of Cox & Tucker (1969). Coronal abundances, a default
density of 10**10 cm**-3, and ionization equilibrium of
Mazzotta et al. (1998) were used.
References
----------
.. [1] Cox, D.P., Tucker, W.H. 1969, ApJ, 157, 1157, DOI: 10.1086/150144
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
1998, A&AS, 133, 339, DOI: 10.1051/aas:1998330
Examples
--------
>>> from sunpy.instr.goes import _calc_rad_loss
>>> from astropy.units.quantity import Quantity
>>> temp = Quantity([11.0, 11.0], unit="MK")
>>> em = Quantity([4.0e+48, 4.0e+48], unit="cm**(-3)")
>>> rad_loss = _calc_rad_loss(temp, em)
>>> rad_loss["rad_loss_rate"]
<Quantity [ 3.01851392e+19, 3.01851392e+19] J / s>
"""
if not download_dir:
download_dir = DATA_PATH
# Check inputs are correct
temp = temp.to(u.K)
em = em.to(1/u.cm**3)
if len(temp) != len(em):
raise ValueError("temp and em must all have same number of elements.")
# If force_download kwarg is True, or required data files cannot be
# found locally, download required data files.
check_download_file(FILE_RAD_COR, GOES_REMOTE_PATH, download_dir,
replace=force_download)
# Initialize lists to hold model data of temperature - rad loss rate
# relationship read in from csv file
modeltemp = [] # modelled temperature is in log_10 space in units of MK
model_loss_rate = []
# Read data from csv file into lists, being sure to skip commented
# lines beginning with "#"
with open(os.path.join(DATA_PATH, FILE_RAD_COR),
"r") as csvfile:
startline = csvfile.readlines()[7:]
csvreader = csv.reader(startline, delimiter=" ")
for row in csvreader:
modeltemp.append(float(row[0]))
model_loss_rate.append(float(row[1]))
modeltemp = np.asarray(modeltemp)
model_loss_rate = np.asarray(model_loss_rate)
# Ensure input values of flux ratio are within limits of model table
if temp.value.min() < modeltemp.min() or \
temp.value.max() > modeltemp.max():
raise ValueError("All values in temp must be within the range " +
"{0} - {1} MK.".format(np.min(modeltemp/1e6),
np.max(modeltemp/1e6)))
# Perform spline fit to model data to get temperatures for input
# values of flux ratio
spline = interpolate.splrep(modeltemp, model_loss_rate, s=0)
rad_loss = em.value * interpolate.splev(temp.value, spline, der=0)
rad_loss = u.Quantity(rad_loss, unit='erg/s')
rad_loss = rad_loss.to(u.J/u.s)
# If obstime keyword giving measurement times is set, calculate
# radiative losses integrated over time.
if obstime is not None:
# First ensure obstime is of same length as temp and em and of
# correct type.
n = len(temp)
if len(obstime) != n:
raise IOError("obstime must have same number of elements as "
"temp and em.")
if type(obstime) == pandas.tseries.index.DatetimeIndex:
obstime = obstime.to_pydatetime
if any(type(obst) == str for obst in obstime):
parse_time(obstime)
if not all(type(obst) == datetime.datetime for obst in obstime):
raise TypeError("obstime must be an array-like whose elements are"
" convertible to datetime objects.")
# Check elements in obstime in chronological order
chrono_check = obstime-np.roll(obstime, 1)
chrono_check = chrono_check[1:]
if not all(chrono_check > datetime.timedelta(0)):
raise ValueError(
"Elements of obstime must be in chronological order.")
# Next, get measurement times in seconds from time of first
# measurement.
obstime_seconds = np.array([(ot-obstime[0]).total_seconds()
for ot in obstime], dtype="float64")
# Finally, integrate using trapezoid rule
rad_loss_int = trapz(rad_loss.value, obstime_seconds)
rad_loss_int = u.Quantity(rad_loss_int, unit=rad_loss.unit*u.s)
# Calculate cumulative radiated energy in each GOES channel as
# a function of time.
rad_loss_cumul = cumtrapz(rad_loss, obstime_seconds)
rad_loss_cumul = u.Quantity(rad_loss_cumul, unit=rad_loss.unit*u.s)
# Enter results into output dictionary.
rad_loss_out = {"rad_loss_rate":rad_loss,
"rad_loss_cumul" : rad_loss_cumul,
"rad_loss_int":rad_loss_int}
else:
rad_loss_out = {"rad_loss_rate":rad_loss}
return rad_loss_out
def calculate_xray_luminosity(goeslc):
"""
Calculates GOES solar X-ray luminosity.
This function calculates the solar X-ray luminosity in the GOES
wavelength ranges (1-8 angstroms and 0.5-4 angstroms) based on the
observed GOES fluxes. The units of the results are W. The calculation
is made by simply assuming that the radiation is emitted isotropically,
i.e. is distributed over a spherical surface area with a radius equal to
the Sun-Earth distance. Once the luminosity in each GOES passband is
found, they are returned in a new LightCurve object also containing the
metadata and data of the input LightCurve object.
Parameters
----------
goeslc : `~sunpy.lightcurve.LightCurve`
LightCurve object containing GOES flux data which MUST
be in units of W/m^2.
Returns
-------
lc_new : `~sunpy.lightcurve.LightCurve`
Contains same metadata and data as input LightCurve with the
following additional data columns;
| goeslc_new.data.luminosity_xrsa - Xray luminosity in 0.5-4A channel
unit=[W]
| goeslc_new.data.luminosity_xrsb - Xray luminosity in 1-8A channel
unit=[W]
Examples
--------
>>> from sunpy.instr.goes import calculate_xray_luminosity
>>> import sunpy.lightcurve as lc
>>> time1 = "2014-01-01 00:00:00"
>>> time2 = "2014-01-01 00:00:08"
>>> goeslc = lc.GOESLightCurve.create(time1, time2)
>>> goeslc.data
xrsa xrsb
2014-01-01 00:00:00.421999 9.187300e-08 0.000004
2014-01-01 00:00:02.468999 9.187300e-08 0.000004
2014-01-01 00:00:04.518999 9.187300e-08 0.000004
2014-01-01 00:00:06.564999 9.298800e-08 0.000004
>>> goeslc_new = calculate_xray_luminosity(goeslc)
>>> goeslc_new.data # doctest: +NORMALIZE_WHITESPACE
xrsa xrsb luminosity_xrsa \\
2014-01-01 00:00:00.421999 9.187300e-08 0.000004 2.498319e+16
2014-01-01 00:00:02.468999 9.187300e-08 0.000004 2.498319e+16
2014-01-01 00:00:04.518999 9.187300e-08 0.000004 2.498319e+16
2014-01-01 00:00:06.564999 9.298800e-08 0.000004 2.528640e+16
<BLANKLINE>
luminosity_xrsb
2014-01-01 00:00:00.421999 9.543993e+17
2014-01-01 00:00:02.468999 9.543993e+17
2014-01-01 00:00:04.518999 9.529851e+17
2014-01-01 00:00:06.564999 9.529851e+17
"""
# Check that input argument is of correct type
if not isinstance(goeslc, lightcurve.GOESLightCurve):
raise TypeError("goeslc must be a GOESLightCurve object.")
# Find temperature and emission measure with _goes_chianti_tem
lx_out = _goes_lx(u.Quantity(goeslc.data.xrsb, unit="W/m**2"),
u.Quantity(goeslc.data.xrsa, unit="W/m**2"),
date=str(goeslc.data.index[0]))
# Enter results into new version of GOES LightCurve Object
# Use copy.deepcopy for replicating meta and data so that input
# lightcurve is not altered.
lc_new = lightcurve.LightCurve(meta=copy.deepcopy(goeslc.meta),
data=copy.deepcopy(goeslc.data))
lc_new.data["luminosity_xrsa"] = lx_out["shortlum"].to("W").value
lc_new.data["luminosity_xrsb"] = lx_out["longlum"].to("W").value
return lc_new
def _goes_lx(longflux, shortflux, obstime=None, date=None):
"""
Calculates solar X-ray luminosity in GOES wavelength ranges.
This function calculates the X-ray luminosity from the Sun in the
GOES wavelength ranges (1-8 angstroms and 0.5-4 angstroms) based
on the observed GOES fluxes. The units of the results are erg/s.
The calculation is made by simply assuming that the radiation is
emitted isotropically, i.e. is distributed over a spherical
surface area with a radius equal to the Sun-Earth distance.
Parameters
----------
longflux : `~astropy.units.Quantity`
Array containing the observed GOES/XRS long channel flux.
Units=[W/m**2]
shortflux : `~astropy.units.Quantity`
Array containing the observed GOES/XRS short channel flux.
Units=[W/m**2]
obstime : (optional) array-like of `datetime.datetime` objects
Measurement times corresponding to each flux measurement.
Assumes each pair of 0.5-4 and 1-8 angstrom flux measurements
were taken simultaneously.
date : (optional) `datetime.datetime` object or valid date string.
Date at which measurements were taken. This is used to
calculate the Sun-Earth distance.
Default=None implies Sun-Earth distance is set to 1AU.
Returns
-------
lx_out : `dict`
dictionary containing the following fields.
longlum : `~astropy.units.Quantity`
Array of luminosity in the 1-8 angstroms range.
shortlum : `~astropy.units.Quantity`
Array of luminosity in the 0.5-4 angstroms range.
longlum_int : (only present if obstime kwarg is set)
shortlum_int : (only present if obstime kwarg is set)
Notes
-----
This function calls _calc_xraylum() to calculate luminosities.
For more information on how this is done, see docstring of that
function.
Examples
--------
>>> from sunpy.instr.goes import _goes_lx
>>> from datetime import datetime
>>> from astropy.units.quantity import Quantity
>>> longflux = Quantity([7e-6,7e-6,7e-6,7e-6,7e-6,7e-6], unit='W/m**2')
>>> shortflux = Quantity([7e-7,7e-7,7e-7,7e-7,7e-7,7e-7], unit='W/m**2')
>>> obstime = np.array([datetime(2014,1,1,0,0,0),
... datetime(2014,1,1,0,0,2),
... datetime(2014,1,1,0,0,4),
... datetime(2014,1,1,0,0,6),
... datetime(2014,1,1,0,0,8),
... datetime(2014,1,1,0,0,10),], dtype=object)
>>> lx_out = _goes_lx(longflux, shortflux, obstime)
>>> lx_out["longlum"]
<Quantity [ 1.96860565e+18, 1.96860565e+18, 1.96860565e+18,
1.96860565e+18, 1.96860565e+18, 1.96860565e+18] W>
>>> lx_out["shortlum"]
<Quantity [ 1.96860565e+17, 1.96860565e+17, 1.96860565e+17,
1.96860565e+17, 1.96860565e+17, 1.96860565e+17] W>
>>> lx_out["longlum_int"]
<Quantity 1.968605654118636e+19 s W>
>>> lx_out["shortlum_int"]
<Quantity 1.9686056541186358e+18 s W>
"""
# Calculate X-ray luminosities
longlum = _calc_xraylum(longflux, date=date)
shortlum = _calc_xraylum(shortflux, date=date)
# If obstime keyword giving measurement times is set, calculate
# total energy radiated in the GOES bandpasses during the flare.
if obstime is not None:
# First ensure longflux, shortflux, and obstime are all of
# equal length and obstime is of correct type.
if not len(longflux) == len(shortflux) == len(obstime):
raise ValueError("longflux, shortflux, and obstime must all have "
"same number of elements.")
if type(obstime) == pandas.tseries.index.DatetimeIndex:
obstime = obstime.to_pydatetime
if any(type(obst) == str for obst in obstime):
parse_time(obstime)
if not all(type(obst) == datetime.datetime for obst in obstime):
raise TypeError("obstime must be an array-like whose elements are"
" convertible to datetime objects.")
# Check elements in obstime in chronological order
chrono_check = obstime-np.roll(obstime, 1)
chrono_check = chrono_check[1:]
if not all(chrono_check > datetime.timedelta(0)):
raise ValueError(
"Elements of obstime must be in chronological order.")
# Next, get measurement times in seconds from time of first
# measurement.
obstime_seconds = np.array([(ot-obstime[0]).total_seconds()
for ot in obstime], dtype="float64")
# Finally, integrate using trapezoid rule
longlum_int = trapz(longlum.value, obstime_seconds)
longlum_int = u.Quantity(longlum_int, unit=longlum.unit*u.s)
shortlum_int = trapz(shortlum.value, obstime_seconds)
shortlum_int = u.Quantity(shortlum_int, unit=shortlum.unit*u.s)
# Calculate cumulative radiated energy in each GOES channel as
# a function of time.
longlum_cumul = cumtrapz(longlum.value, obstime_seconds)
longlum_cumul = u.Quantity(longlum_cumul, unit=longlum.unit*u.s)
shortlum_cumul = cumtrapz(shortlum.value, obstime_seconds)
shortlum_cumul = u.Quantity(shortlum_cumul,
unit=shortlum.unit*u.s)
lx_out = {"longlum":longlum, "shortlum":shortlum,
"longlum_cumul":longlum_cumul,
"shortlum_cumul":shortlum_cumul,
"longlum_int":longlum_int, "shortlum_int":shortlum_int}
else:
lx_out = {"longlum":longlum, "shortlum":shortlum}
return lx_out
@u.quantity_input(flux=u.W/u.m/u.m)
def _calc_xraylum(flux, date=None):
"""
Calculates solar luminosity based on observed flux observed at 1AU.
This function calculates the luminosity from the Sun based
on observed flux in W/m**2. The units of the results are erg/s.
The calculation is made by simply assuming that the radiation is
emitted isotropically, i.e. is distributed over a spherical
surface area with a radius equal to the Sun-Earth distance.
Parameters
----------
flux : `~astropy.units.Quantity`
Containing the observed solar flux. Units=[W/m**2]
date : (optional) `datetime.datetime` object or valid date string
Used to calculate a more accurate Sun-Earth distance based on
Earth's orbit at that date. If date is None, Sun-Earth
distance is set to 1AU.
Returns
-------
xraylum : `~astropy.units.Quantity` array with units=erg/s.
Array of X-ray luminosity.
Examples
--------
>>> from sunpy.instr.goes import _calc_xraylum
>>> from astropy.units.quantity import Quantity
>>> flux = Quantity([7e-6,7e-6], unit="W/m**2")
>>> xraylum = _calc_xraylum(flux, date="2014-04-21")
>>> xraylum
<Quantity [ 1.98649103e+18, 1.98649103e+18] W>
"""
if date is not None:
date = parse_time(date)
xraylum = 4 * np.pi * sun.sun.sunearth_distance(t=date).to("m")**2 * flux
else:
xraylum = 4 * np.pi * sun.constants.au.to("m")**2 * flux
return xraylum
def flareclass_to_flux(flareclass):
"""
Converts a GOES flare class into the corresponding X-ray flux.
Parameters
----------
flareclass : str
The case-insensitive flare class (e.g., 'X3.2', 'm1.5', 'A9.6').
Returns
-------
flux : `~astropy.units.Quantity`
X-ray flux between 1 and 8 Angstroms as measured near Earth in W/m^2.
Raises
------
TypeError
Input must be a string.
Examples
--------
>>> flareclass_to_flux('A1.0')
1e-08 W / m2
>>> flareclass_to_flux('c4.7')
4.7e-06 W / m2
>>> flareclass_to_flux('X2.4')
0.00024 W / m2
"""
if type(flareclass) != type('str'):
raise TypeError("Input must be a string")
#TODO should probably make sure the string is in the expected format.
flareclass = flareclass.upper()
#invert the conversion dictionary
#conversion_dict = {v: k for k, v in GOES_CONVERSION_DICT.items()}
return float(flareclass[1:]) * GOES_CONVERSION_DICT[flareclass[0]]
@u.quantity_input(goesflux=u.watt/u.m**2)
def flux_to_flareclass(goesflux):
"""
Converts X-ray flux into the corresponding GOES flare class.
Parameters
----------
flux : `~astropy.units.Quantity`
X-ray flux between 1 and 8 Angstroms (usually measured by GOES) as
measured at the Earth in W/m^2
Returns
-------
flareclass : str
The flare class e.g.: 'X3.2', 'M1.5', 'A9.6'.
Raises
------
ValueError
Flux cannot be negative.
References
----------
`Solar Flare Classification <https://en.wikipedia.org/wiki/Solar_flare#Classification>`_
Examples
--------
>>> flux_to_flareclass(1e-08 * u.watt/u.m**2)
'A1'
>>> flux_to_flareclass(4.7e-06 * u.watt/u.m**2)
'C4.7'
>>> flux_to_flareclass(0.00024 * u.watt/u.m**2)
'X2.4'
>>> flux_to_flareclass(7.8e-09 * u.watt/u.m**2)
'A0.78'
>>> flux_to_flareclass(0.00682 * u.watt/u.m**2)
'X68.2'
"""
if goesflux.value < 0:
raise ValueError("Flux cannot be negative")
decade = np.floor(np.log10(goesflux.to('W/m**2').value))
#invert the conversion dictionary
conversion_dict = {v: k for k, v in GOES_CONVERSION_DICT.items()}
if decade < -8:
str_class = "A"
decade = -8
elif decade > -4:
str_class = "X"
decade = -4
else:
str_class = conversion_dict.get(u.Quantity(10 ** decade, "W/m**2" ))
goes_subclass = 10 ** -decade * goesflux.to('W/m**2').value
return "{0}{1:.3g}".format(str_class, goes_subclass)
| bsd-2-clause |
summychou/LaserQt | code/LaserQt_Gui/LaserQt_Gui_Canvas.py | 2 | 8240 | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use("Qt5Agg")
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
from PyQt5.QtWidgets import QSizePolicy
import os
FONT = FontProperties(fname=(os.getcwd() + "/LaserQt_Font/wqy-microhei.ttc"), size=10)
'''
@author : Zhou Jian
@email : [email protected]
@version : V1.0
@date : 2017.02.22
'''
class BaseCanvas(FigureCanvas):
'''
基类画布
'''
def __init__(self, figure):
super(BaseCanvas, self).__init__(figure)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class StaticCanvasForPathInfo(BaseCanvas):
'''
静态路径信息画布,继承自基类画布
'''
def __init__(self, width=6, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工路径静态图", fontproperties=FONT, fontsize=14)
self.axes.set_xlabel("X - 板长方向(m)", fontproperties=FONT, fontsize=9)
self.axes.set_ylabel("Y - 板宽方向(m)", fontproperties=FONT, fontsize=9)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForPathInfo, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class DynamicCanvasForPathInfo(BaseCanvas):
'''
动态路径信息画布,继承自基类画布
'''
def __init__(self, width=6, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工路径动态图", fontproperties=FONT, fontsize=14)
self.axes.set_xlabel("X - 板长方向(m)", fontproperties=FONT, fontsize=9)
self.axes.set_ylabel("Y - 板宽方向(m)", fontproperties=FONT, fontsize=9)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(DynamicCanvasForPathInfo, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
def update_figure(self):
pass
class Static3DCanvasForPointCloud(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self):
fig = plt.figure()
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = Axes3D(fig)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_zticks([])
self.axes.set_xlabel("加工板X方向", fontproperties=FONT, fontsize=9)
self.axes.set_ylabel("加工板Y方向", fontproperties=FONT, fontsize=9)
self.axes.set_zlabel("加工板Z方向", fontproperties=FONT, fontsize=9)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(Static3DCanvasForPointCloud, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve01(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板水平方向1/3处误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve01, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve02(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板水平方向1/2处误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve02, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve03(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板水平方向2/3处误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve03, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve04(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板垂直方向1/3处误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve04, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve05(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板垂直方向2/3处误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve05, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
class StaticCanvasForErrorCurve06(BaseCanvas):
'''
静态点云数据拟合画布,继承自基类画布
'''
def __init__(self, width=2, height=2, dpi=50):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("white")
fig.set_edgecolor("black")
self.axes = fig.add_subplot(111)
self.axes.set_xticks([])
self.axes.set_yticks([])
self.axes.set_title("加工板任意两点间误差曲线图", fontproperties=FONT, fontsize=14)
self.axes.grid(True, which="both")
# We want the axes cleared every time plot() is called
self.axes.hold(False)
super(StaticCanvasForErrorCurve06, self).__init__(figure=fig)
def compute_initial_figure(self):
pass
| mit |
mehdidc/scikit-learn | sklearn/feature_selection/variance_threshold.py | 26 | 2532 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
zhoujj2013/lncfuntk | bin/Training/plot_roc_crossval.py | 1 | 4785 | import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
import sys
import os
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# prepare dataset
###############################################################################
kfold = int(sys.argv[3])
prefix = sys.argv[4]
dat_fh = open(sys.argv[1], 'rb')
tar_fh = open(sys.argv[2], 'rb')
dat = []
while True:
l = dat_fh.readline()
if len(l) == 0:
break
lc = l.strip("\n").split("\t")
dat.append([float(i) for i in lc])
X = np.array(dat)
tar = []
while True:
l = tar_fh.readline()
if len(l) == 0:
break
lc = l.strip("\n").split("\t")
tar.append(float(lc[0]))
y = np.array(tar)
###############################################################################
# Classification and ROC analysis
#
# Run classifier with cross-validation and plot ROC curves
print "Training data count: " + str(len(y))
cv = StratifiedKFold(y, n_folds=kfold)
#for train, test in cv:
# print("%s %s" % (train, test))
####################
# your classifier
####################
if prefix == "LR":
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(penalty='l2', C=1)
elif prefix == "GBDT":
from sklearn.ensemble import GradientBoostingClassifier
classifier = GradientBoostingClassifier(n_estimators=200)
elif prefix == "RF":
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=10)
elif prefix == "SVM":
from sklearn.svm import SVC
classifier = SVC(kernel='linear', probability=True)
elif prefix == "NB":
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB(alpha=0.01)
elif prefix == "KNN":
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
elif prefix == "DT":
from sklearn import tree
classifier = tree.DecisionTreeClassifier()
else:
print >>sys.stderr, "Wrong prefix: " % (prefix)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
model_save = {}
best_model_auc = 0
best_model_index = 0
from sklearn import metrics
for i, (train, test) in enumerate(cv):
classifier.fit(X[train], y[train])
a = classifier.fit(X[train], y[train])
#print a.coef_
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1], pos_label=1)
predict = classifier.fit(X[train], y[train]).predict(X[test])
precision = metrics.precision_score(y[test], predict)
recall = metrics.recall_score(y[test], predict)
print str(precision) + "\t" + str(recall)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
model_save[i] = classifier
if roc_auc > best_model_auc:
best_model_auc = roc_auc
best_model_index = i
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
from sklearn.externals import joblib
print best_model_index
print "################\nCoef_\n";
print model_save[best_model_index].coef_[0]
print "###############\n";
coef_normalized_arr = []
coef_sum = sum(model_save[best_model_index].coef_[0])
for coef in model_save[best_model_index].coef_[0]:
coef_normalized = coef/float(coef_sum)
coef_normalized_arr.append(coef_normalized)
out = open("./" + prefix + ".weight.value.lst", 'wb')
print >>out,"miRNA_weight\t%.2f\nTF_weight\t%.2f\nPCG_weight\t%.2f" % (coef_normalized_arr[0], coef_normalized_arr[1], coef_normalized_arr[2])
out.close()
try:
os.makedirs("./" + prefix)
except OSError:
if not os.path.isdir("./" + prefix):
raise
joblib.dump(model_save[best_model_index], "./" + prefix + "/" + prefix + ".pkl")
if prefix == "LR" or prefix == "SVM" or prefix == "KNN":
print model_save[best_model_index].coef_
print model_save[best_model_index].intercept_
else:
print model_save[best_model_index].feature_importances_
#plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6))
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
out = open("./" + prefix + ".tpr.fpr.txt", 'wb')
for i in range(0,len(mean_fpr), 1):
print >>out, "%.4f\t%.4f" % (mean_tpr[i], mean_fpr[i])
out.close()
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic by ' + prefix)
plt.legend(loc="lower right")
plt.savefig("./" + prefix + ".png")
| mit |
Bismarrck/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ppuggioni/hrate | hrate/data_handling/selfloops.py | 1 | 1026 | import pandas as pd
import numpy as np
import logging
def read_selfloops_file(path, skiprows=30, skipfooter=30):
"""
it parses the file and returns a pandas df
:param path:
:param skiprows: how many rows of data to skip at the beginning
:param skipfooter: how many rows of data to skip at the end
:return:
"""
with open(path, 'r') as f:
logging.info('Opening file {}'.format(path))
first_line = f.readline().strip('\n')
start_time = pd.to_datetime(first_line, format='%d %B %Y %H:%M:%S')
df = pd.read_csv(path,
names=['Time', 'HR', 'RR'], skiprows=skiprows+2, skipfooter=skipfooter,
dtype={'Time': np.float, 'HR': np.float, 'RR': np.float},
engine='python'
)
df['Time_stamp'] = start_time + pd.to_timedelta(df['Time'], unit='ms')
df['Time_lapsed'] = df['Time_stamp'] - df['Time_stamp'].min()
columns = ['Time_stamp', 'Time_lapsed', 'HR', 'RR']
return df[columns] | apache-2.0 |
rexshihaoren/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
legacysurvey/pipeline | py/legacyanalysis/runstatus.py | 2 | 3760 | from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import qdo
import sys
import argparse
from legacypipe.survey import LegacySurveyData
from astrometry.libkd.spherematch import *
parser = argparse.ArgumentParser()
parser.add_argument('--wrap', action='store_true',
help='Wrap RA at 180 degrees?')
parser.add_argument('args',nargs=argparse.REMAINDER)
opt = parser.parse_args()
args = opt.args
if len(args) < 1:
print('Need one+ arg: qdo queue name(s)')
sys.exit(-1)
state_radec = {}
for qname in args:
q = qdo.connect(qname, create_ok=False)
print('Connected to QDO queue', qname, q)
for state in qdo.Task.VALID_STATES:
print('State', state)
# Append jobs from all queues in this state into RA,Dec arrays
if state in state_radec:
ra,dec = state_radec[state]
else:
ra,dec = [],[]
state_radec[state] = (ra,dec)
tasks = q.tasks(state=state)
print(len(tasks), 'tasks with state', state)
for task in tasks:
brick = task.task
#brickobj = survey.get_brick_by_name(brick)
#r = brickobj.ra
#d = brickobj.dec
try:
rastr = brick[:4]
r = int(rastr, 10) / 10.
decstr = brick[5:]
d = int(decstr, 10) / 10.
d *= (-1 if brick[4] == 'm' else 1)
except:
print('Failed to parse RA,Dec string', rastr, decstr)
continue
#print('Brick', brick, '->', r, d)
if opt.wrap:
if r > 180:
r -= 360.
ra.append(r)
dec.append(d)
cmap = { qdo.Task.WAITING: 'k',
qdo.Task.PENDING: '0.5',
qdo.Task.RUNNING: 'b',
qdo.Task.SUCCEEDED: 'g',
qdo.Task.FAILED: 'r',
}
plt.clf()
lp,lt = [],[]
cmap = { qdo.Task.WAITING: 'k',
qdo.Task.PENDING: '0.5',
qdo.Task.RUNNING: 'b',
qdo.Task.SUCCEEDED: 'g',
qdo.Task.FAILED: 'r',
}
for state in [qdo.Task.WAITING, qdo.Task.SUCCEEDED, qdo.Task.PENDING,
qdo.Task.RUNNING, qdo.Task.FAILED]:
if not state in state_radec:
continue
ra,dec = state_radec[state]
p = plt.plot(ra, dec, '.', color=cmap.get(state, 'y'))
lp.append(p[0])
lt.append(state)
plt.xlim([0, 360])
plt.figlegend(lp, lt, 'upper left')
plt.savefig('status.png')
sys.exit(0)
allra = []
alldec = []
allstate = []
alltasks = []
# allra.append(ra)
# alldec.append(dec)
# allstate.append([state] * len(ra))
# alltasks.append(tasks)
ra = np.hstack(allra)
dec = np.hstack(alldec)
state = np.hstack(allstate)
tasks = np.hstack(alltasks)
# Match to actual table of bricks to get brickq.
survey = LegacySurveyData()
bricks = survey.get_bricks_readonly()
I,J,d = match_radec(ra, dec, bricks.ra, bricks.dec, 0.2, nearest=True)
print(len(ra), 'jobs')
print(len(I), 'matches')
ra = ra[I]
dec = dec[I]
state = state[I]
tasks = tasks[I]
brickq = bricks.brickq[J]
for q in [0,1,2,3]:
print()
print('Brickq', q)
plt.clf()
lp,lt = [],[]
for s in qdo.Task.VALID_STATES:
I = np.flatnonzero((brickq == q) * (state == s))
if len(I) == 0:
continue
print('State', s)
#if len(I) < 10:
print(' tasks', [t.task for t in tasks[I[:10]]], '...' if len(I) > 10 else '')
p = plt.plot(ra[I], dec[I], '.', color=cmap.get(s, 'y'))
lp.append(p[0])
lt.append(s)
plt.title('Brickq = %i' % q)
# HACK
plt.xlim([-45, 65])
plt.figlegend(lp, lt, 'upper right')
plt.savefig('status-%i.png' % q)
| gpl-2.0 |
brainsqueeze/Philly_bars_and_restaurants | AppendInfo.py | 1 | 3731 | import numpy as np
import pandas as pd
import datetime, time
import itertools
def FillOldTab():
"""
Old tables do not have all of the detailed information. Here I am finding
that info from more recent lists and using that to fill in gaps in the old
tables. I will only keep listings for which I can find the detailed info.
"""
df14 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2014.tsv', sep='\t', index_col='Rank')
df13 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2013.tsv', sep='\t', index_col='Rank').loc[:,['Bar']]
df12 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2012.tsv', sep='\t', index_col='Rank').loc[:,['Bar']]
df11 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2011.tsv', sep='\t', index_col='Rank').loc[:,['Bar']]
df10 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2010.tsv', sep='\t', index_col='Rank').loc[:,['Bar']]
df09 = pd.read_csv('Lists/foobooz-50-best-bars-philadelphia_2009.tsv', sep='\t', index_col='Rank').loc[:,['Bar']]
Bar14 = pd.Series(df14['Bar'])
Bar13 = pd.Series(df13['Bar'])
Bar12 = pd.Series(df12['Bar'])
Bar11 = pd.Series(df11['Bar'])
Bar10 = pd.Series(df10['Bar'])
Bar09 = pd.Series(df09['Bar'])
int13 = pd.Series(list(set(Bar14).intersection(set(Bar13))))
int12 = pd.Series(list(set(Bar14).intersection(set(Bar12))))
int11 = pd.Series(list(set(Bar14).intersection(set(Bar11))))
int10 = pd.Series(list(set(Bar14).intersection(set(Bar10))))
int09 = pd.Series(list(set(Bar14).intersection(set(Bar09))))
List13 = df13[Bar13.isin(int13)]
List13 = List13.sort(['Bar'], ascending=True)
List13 = pd.merge(List13, df14[Bar14.isin(int13)].sort(['Bar'], ascending=True).loc[:, ['Bar', 'Neighborhood', 'Address', 'Cuisine Category', 'Price']], on='Bar', left_index=True, right_index=False)
List12 = df12[Bar12.isin(int12)]
List12 = List12.sort(['Bar'], ascending=True)
List12 = pd.merge(List12, df14[Bar14.isin(int12)].sort(['Bar'], ascending=True).loc[:, ['Bar', 'Neighborhood', 'Address', 'Cuisine Category', 'Price']], on='Bar', left_index=True, right_index=False)
List11 = df11[Bar11.isin(int11)]
List11 = List11.sort(['Bar'], ascending=True)
List11 = pd.merge(List11, df14[Bar14.isin(int11)].sort(['Bar'], ascending=True).loc[:, ['Bar', 'Neighborhood', 'Address', 'Cuisine Category', 'Price']], on='Bar', left_index=True, right_index=False)
List10 = df10[Bar10.isin(int10)]
List10 = List10.sort(['Bar'], ascending=True)
List10 = pd.merge(List10, df14[Bar14.isin(int10)].sort(['Bar'], ascending=True).loc[:, ['Bar', 'Neighborhood', 'Address', 'Cuisine Category', 'Price']], on='Bar', left_index=True, right_index=False)
List09 = df09[Bar09.isin(int09)]
List09 = List09.sort(['Bar'], ascending=True)
List09 = pd.merge(List09, df14[Bar14.isin(int09)].sort(['Bar'], ascending=True).loc[:, ['Bar', 'Neighborhood', 'Address', 'Cuisine Category', 'Price']], on='Bar', left_index=True, right_index=False)
df14['Year'] = 2014
List13['Year'] = 2013
List12['Year'] = 2012
List11['Year'] = 2011
List10['Year'] = 2010
List09['Year'] = 2009
Total = df14.sort(['Bar'], ascending=True).append([List13, List12, List11, List10, List09])
Total.to_csv('Lists/Bars_total.tsv', sep='\t')
List13.to_csv('Lists/2013_Bars_appended.tsv', sep='\t')
List12.to_csv('Lists/2012_Bars_appended.tsv', sep='\t')
List11.to_csv('Lists/2011_Bars_appended.tsv', sep='\t')
List10.to_csv('Lists/2010_Bars_appended.tsv', sep='\t')
List09.to_csv('Lists/2009_Bars_appended.tsv', sep='\t')
def main():
FillOldTab()
if __name__ == '__main__':
main()
| mit |
gnina/scripts | affinity_search/getres.py | 1 | 1061 | #!/usr/bin/env python
'''Return the top and R statistics for every row of the database that has them'''
import sys, re, MySQLdb, argparse, os, json, subprocess
import pandas as pd
import makemodel
import numpy as np
def getcursor():
'''create a connection and return a cursor;
doing this guards against dropped connections'''
conn = MySQLdb.connect (host = args.host,user = "opter",passwd=args.password,db=args.db)
conn.autocommit(True)
cursor = conn.cursor()
return cursor
parser = argparse.ArgumentParser(description='Return top and R statistics for successful rows in database')
parser.add_argument('--host',type=str,help='Database host',required=True)
parser.add_argument('-p','--password',type=str,help='Database password',required=True)
parser.add_argument('--db',type=str,help='Database name',default='opt1')
args = parser.parse_args()
cursor = getcursor()
cursor.execute('SELECT serial,top,R,auc,rmse FROM params WHERE rmse IS NOT NULL')
rows = cursor.fetchall()
for row in rows:
print('%d %f %f %f %f' % row)
| bsd-3-clause |
buckbaskin/stopsign | src/v1/ml_compare.py | 1 | 9016 | #!/usr/bin/env python
import rospkg
import cv2
import datetime
import numpy as np
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# from matplotlib import pyplot as plt
# class ,descr00,descr01,descr02,descr03,descr04,descr05,descr06,descr07,
# descr08,descr09,descr10,descr11,descr12,descr13,descr14,descr15,descr16,
# descr17,descr18,descr19,descr20,descr21,descr22,descr23,descr24,descr25,
# descr26,descr27,descr28,descr29,descr30,descr31,angle ,classid,octave ,
# x ,y ,respons,size ,imageid
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('stopsign')
IMAGE_RATE = 11 # hz
BULK_DATA_FILE = '%s/data/003_manual_labels/all.csv' % (pkg_path,)
start_image_id = 0
end_image_id = 2189
IMAGE_BASE_STRING = '%s/data/002_original_images/%s' % (pkg_path, 'frame%04d.jpg')
descriptors = []
for i in range(32):
descriptors.append('descr%02d' % (i,))
klass = ['class'.ljust(7)]
def get_image(image_id):
filename = IMAGE_BASE_STRING % (image_id,)
return cv2.imread(filename, cv2.IMREAD_COLOR)
def load_data(seed=None):
df = pd.read_csv(BULK_DATA_FILE, header=0)
# mutate data back from stored form
df['class '] = df['class '].apply(lambda cls: cls / 1000.0)
df['angle '] = df['angle '].apply(lambda ang: ang / 1000.0)
df['respons'] = df['respons'].apply(lambda res: res / 100000000.0)
# split into class, features
X = df[descriptors]
y = df[klass]
print('X.describe()')
print(X.describe())
print('y.describe()')
print(y.describe())
# use mask to split into test, train
if seed is not None:
np.random.seed(seed)
msk = np.random.rand(len(df)) < 0.8
train_X = X[msk].as_matrix()
test_X = X[~msk].as_matrix()
train_y = y[msk].as_matrix().ravel()
test_y = y[~msk].as_matrix().ravel()
return train_X, train_y, test_X, test_y
def subsample_data(X, y, ratio=0.5, seed=None):
size = 1100
rus = RandomUnderSampler(
ratio={
0: int(size * ratio),
1: int(size * (1 - ratio)),
},
random_state=seed)
return rus.fit_sample(X, y)
def increment_index_list(index, max_list):
index[-1] += 1
if index[-1] >= max_list[-1]:
for i in range(len(index) - 1, 0, -1):
if index[i] >= max_list[i]:
index[i] = 0
index[i-1] += 1
return index
def make_all_combinations(dict_of_arglists):
input_args = list(dict_of_arglists.keys())
max_list = []
for input_key in input_args:
max_list.append(len(dict_of_arglists[input_key]))
index_list = [0] * len(input_args)
count = 1
for val in max_list:
count *= val
for _ in range(count):
input_vals = []
for index, input_key in enumerate(input_args):
input_vals.append(dict_of_arglists[input_key][index_list[index]])
combined = zip(input_args, input_vals)
d = dict(combined)
# print(d)
yield d
index_list = increment_index_list(index_list, max_list)
if __name__ == '__main__':
### Begin the whole process ###
'''
Things to work on:
Vary up the dataset:
- Classify the total image instead of just one keypoint
- Learn based on the classification of all of the keypoints in the
image and their location
- With classification of image in hand, classify image with random
perturbations
- rotate the image gaussian ammount
- add gaussian noise to the whole image
- add gaussian brightness to the whole image
- add guassian darkness
- red
- green
- blue
- shift left, right, up, down (should be no biggy, can skip because
keypoints will just shift)
- image flip left/right, up/down?
- scaling image (zoom center)
- affine transform
- perspective transform
Once I've played with varying the dataset, I should either find a set of
images that confirm the stopsign is pretty robust or an expanded training
set to train with. From there, optimize KNN,
'''
# load data from csv, split into training and test sets
print('begin loading data')
train_X, train_y, test_X, test_y = load_data(12345)
Klassifiers = [
# GradientBoostingClassifier,
# GaussianProcessClassifier, # This gave a MemoryError on round 0/6
# SGDClassifier,
# KNeighborsClassifier, # removed due to performance with 500 keypoints (30 sec per predict)
# MLPClassifier,
# SVC,
DecisionTreeClassifier,
]
gbc_spec = {
'loss': ['exponential', 'deviance',],
'n_estimators': [50, 100, 150, 200,],
'max_depth': [2, 3, 4, 5,],
}
sgd_spec = {
'loss': ['hinge', 'log', 'modified_huber',],
'penalty': ['l2', 'l1', 'elasticnet',],
'max_iter': [1, 5, 10, 25, 50, 100, 250, 500, 1000, 2000,],
}
knn_spec = {
'n_neighbors': [2, 5, 10],
'weights': ['uniform', 'distance',],
}
mlp_spec = {
'hidden_layer_sizes': [(100,), (50,), (200,), (100, 100,), (100, 50,), (100, 50, 25,),],
'activation': ['logistic', 'tanh', 'relu',],
}
svc_spec = {
'C': [0.5, 1.0, 2.0,],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid',],
# 'degree': [2, 3, 4, 5, 6,], # explore if poly kernel is promising
# 'shrinking': [True, False,],
}
dtc_spec = {
'criterion': ['gini', 'entropy',],
'max_depth': [None, 2, 3, 4, 5,],
'min_samples_split': [2, 4, 8,],
}
Klassifier_configs = []
# Klassifier_configs.append(gbc_spec)
# Klassifier_configs.append(sgd_spec)
# Klassifier_configs.append(knn_spec)
# Klassifier_configs.append(mlp_spec)
# Klassifier_configs.append(svc_spec)
Klassifier_configs.append(dtc_spec)
num_tests = 10
for index, Klassifier in enumerate(Klassifiers):
acc = []
pre = []
rec = []
tim = []
for config_setup in make_all_combinations(Klassifier_configs[index]):
print('current config: %s' % (config_setup,))
acc_accum = 0
pre_accum = 0
rec_accum = 0
tim_accum = 0
for seed in range(0, num_tests):
print('round %4d/%4d' % (seed+1, num_tests))
train_X, train_y = subsample_data(train_X, train_y, 0.5, seed+9002)
# print('begin fitting')
classifier = Klassifier(**config_setup)
classifier.fit(train_X, train_y)
# print('end fitting')
# TODO(buckbaskin): rewrite to split into sets of 500 kp
# print('begin pred')
stime = datetime.datetime.now()
y_pred = classifier.predict(test_X)
etime = datetime.datetime.now()
# print('end pred')
# print('begin scoring')
acc_accum += accuracy_score(y_true=test_y, y_pred=y_pred)
pre_accum += precision_score(y_true=test_y, y_pred=y_pred)
rec_accum += recall_score(y_true=test_y, y_pred=y_pred)
tim_accum += (etime - stime).total_seconds()
# print('end scoring')
acc.append(acc_accum / num_tests)
pre.append(pre_accum / num_tests)
rec.append(rec_accum / num_tests)
tim.append(tim_accum / num_tests)
print('a: %.4f (percent correctly classified)' % (acc_accum / num_tests,))
print('p: %.4f (percent of correct positives)' % (pre_accum / num_tests,))
print('r: %.4f (percent of positive results found)' % (rec_accum / num_tests,))
print('t: %.4f sec' % (tim_accum / num_tests,))
print(Klassifier)
print('a: %.4f (avg percent correctly classified)' % (sum(acc)/len(acc),))
acc_index = acc.index(max(acc))
print(' %.4f (max)' % (max(acc),))
# print(' %.4f (max) %s' % (max(acc), Klassifier_configs[index][acc_index],))
print('p: %.4f (avg percent of correct positives)' % (sum(pre)/len(pre),))
print('r: %.4f (avg percent of positive results found)' % (sum(rec)/len(rec),))
print('t: %.4f avg sec' % (sum(tim) / len(tim)))
tim_index = tim.index(min(tim))
print(' %.4f sec (minimum)' % (min(tim),))
# print(' %.4f (min) %s' % (min(tim), Klassifier_configs[index][tim_index],))
| mit |
nmayorov/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 19 | 4088 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
zorroblue/scikit-learn | examples/tree/plot_tree_regression.py | 82 | 1562 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
great-expectations/great_expectations | tests/core/test_serialization.py | 1 | 1763 | import logging
from decimal import Decimal
from great_expectations.core.util import (
convert_to_json_serializable,
requires_lossy_conversion,
)
def test_lossy_serialization_warning(caplog):
caplog.set_level(logging.WARNING, logger="great_expectations.core")
d = Decimal("12345.678901234567890123456789")
convert_to_json_serializable(d)
assert len(caplog.messages) == 1
assert caplog.messages[0].startswith(
"Using lossy conversion for decimal 12345.678901234567890123456789"
)
caplog.clear()
d = Decimal("0.1")
convert_to_json_serializable(d)
print(caplog.messages)
assert len(caplog.messages) == 0
def test_lossy_conversion():
d = Decimal("12345.678901234567890123456789")
assert requires_lossy_conversion(d)
d = Decimal("12345.67890123456")
assert requires_lossy_conversion(d)
d = Decimal("12345.6789012345")
assert not requires_lossy_conversion(d)
d = Decimal("0.12345678901234567890123456789")
assert requires_lossy_conversion(d)
d = Decimal("0.1234567890123456")
assert requires_lossy_conversion(d)
d = Decimal("0.123456789012345")
assert not requires_lossy_conversion(d)
d = Decimal("0.1")
assert not requires_lossy_conversion(d)
# TODO add unittests for convert_to_json_serializable() and ensure_json_serializable()
def test_serialization_of_spark_df(spark_session):
import pandas as pd
df = pd.DataFrame({"a": [1, 2, 3]})
sdf = spark_session.createDataFrame(df)
assert convert_to_json_serializable(sdf) == {"a": [1, 2, 3]}
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
sdf = spark_session.createDataFrame(df)
assert convert_to_json_serializable(sdf) == {"a": [1, 2, 3], "b": [4, 5, 6]}
| apache-2.0 |
treycausey/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 2 | 16421 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actually, the parameters alpha = 0 should not be allowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Compute the lasso_path
f = ignore_warnings
coef_path = [e.coef_ for e in f(lasso_path)(X, y, alphas=alphas,
return_models=True,
fit_intercept=False)]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
fit_intercept=False,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
np.asarray(coef_path).T, decimal=1)
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
PrashntS/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
readevalprint/zipline | zipline/examples/pairtrade.py | 11 | 4925 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
@batch_transform
def ols_transform(data, sid1, sid2):
"""Computes regression coefficient (slope and intercept)
via Ordinary Least Squares between two SIDs.
"""
p0 = data.price[sid1]
p1 = sm.add_constant(data.price[sid2], prepend=True)
slope, intercept = sm.OLS(p0, p1).fit().params
return slope, intercept
class Pairtrade(TradingAlgorithm):
"""Pairtrading relies on cointegration of two stocks.
The expectation is that once the two stocks drifted apart
(i.e. there is spread), they will eventually revert again. Thus,
if we short the upward drifting stock and long the downward
drifting stock (in short, we buy the spread) once the spread
widened we can sell the spread with profit once they converged
again. A nice property of this algorithm is that we enter the
market in a neutral position.
This specific algorithm tries to exploit the cointegration of
Pepsi and Coca Cola by estimating the correlation between the
two. Divergence of the spread is evaluated by z-scoring.
"""
def initialize(self, window_length=100):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length,
window_length=self.window_length)
def handle_data(self, data):
######################################################
# 1. Compute regression coefficients between PEP and KO
params = self.ols_transform.handle_data(data, 'PEP', 'KO')
if params is None:
return
intercept, slope = params
######################################################
# 2. Compute spread and zscore
zscore = self.compute_zscore(data, slope, intercept)
self.record(zscores=zscore)
######################################################
# 3. Place orders
self.place_orders(data, zscore)
def compute_zscore(self, data, slope, intercept):
"""1. Compute the spread given slope and intercept.
2. zscore the spread.
"""
spread = (data['PEP'].price - (slope * data['KO'].price + intercept))
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length:]
zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)
return zscore
def place_orders(self, data, zscore):
"""Buy spread if zscore is > 2, sell if zscore < .5.
"""
if zscore >= 2.0 and not self.invested:
self.order('PEP', int(100 / data['PEP'].price))
self.order('KO', -int(100 / data['KO'].price))
self.invested = True
elif zscore <= -2.0 and not self.invested:
self.order('PEP', -int(100 / data['PEP'].price))
self.order('KO', int(100 / data['KO'].price))
self.invested = True
elif abs(zscore) < .5 and self.invested:
self.sell_spread()
self.invested = False
def sell_spread(self):
"""
decrease exposure, regardless of position long/short.
buy for a short position, sell for a long.
"""
ko_amount = self.portfolio.positions['KO'].amount
self.order('KO', -1 * ko_amount)
pep_amount = self.portfolio.positions['PEP'].amount
self.order('PEP', -1 * pep_amount)
if __name__ == '__main__':
start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},
start=start, end=end)
pairtrade = Pairtrade()
results = pairtrade.run(data)
data['spreads'] = np.nan
ax1 = plt.subplot(211)
data[['PEP', 'KO']].plot(ax=ax1)
plt.ylabel('price')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(212, sharex=ax1)
results.zscores.plot(ax=ax2, color='r')
plt.ylabel('zscored spread')
plt.gcf().set_size_inches(18, 8)
| apache-2.0 |
mugizico/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
sanguinariojoe/aquagpusph | examples/3D/spheric_testcase2_dambreak_mpi/cMake/plot_h.py | 1 | 4882 | #******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <[email protected]> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import os
from os import path
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def readFile(filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[1:-1]: # Skip the last line, which may be unready
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return [list(d) for d in zip(*data)]
lines = []
def update(frame_index):
plt.tight_layout()
try:
data = readFile('sensors_h_0.out')
t = data[0]
hh = (data[-4], data[-3], data[-2], data[-1])
except IndexError:
return
except FileNotFoundError:
return
for i, h in enumerate(hh):
lines[i].set_data(t, h)
fig = plt.figure()
ax11 = fig.add_subplot(221)
ax21 = fig.add_subplot(222, sharey=ax11)
ax12 = fig.add_subplot(223, sharex=ax11)
ax22 = fig.add_subplot(224, sharex=ax21, sharey=ax12)
axes = (ax11, ax21, ax12, ax22)
FNAME = path.join('@EXAMPLE_DEST_DIR@', 'test_case_2_exp_data.dat')
# For some reason the input file is bad sortened
T,_,_,_,_,_,_,_,_,H3,H2,H1,H4, = readFile(FNAME)
exp_t = T
exp_h = (H1, H2, H3, H4)
titles = ('H1', 'H2', 'H3', 'H4')
for i, ax in enumerate(axes):
ax.plot(exp_t,
exp_h[i],
label=r'$H_{Exp}$',
color="red",
linewidth=1.0)
t = [0.0]
h = [0.0]
line, = ax.plot(t,
h,
label=r'$H_{SPH}$',
color="black",
linewidth=1.0)
lines.append(line)
# Set some options
ax.grid()
ax.legend(loc='best')
ax.set_title(titles[i])
ax.set_xlim(0, 6)
ax.set_ylim(0.0, 0.6)
ax.set_autoscale_on(False)
if i > 1:
ax.set_xlabel(r"$t \, [\mathrm{s}]$")
else:
plt.setp(ax.get_xticklabels(), visible=False)
if i in (0, 2):
ax.set_ylabel(r"$H \, [\mathrm{m}]$")
else:
plt.setp(ax.get_yticklabels(), visible=False)
update(0)
ani = animation.FuncAnimation(fig, update, interval=5000)
plt.show()
| gpl-3.0 |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-interpolate-SmoothSphereBivariateSpline-1.py | 1 | 1288 | # Suppose we have global data on a coarse grid (the input data does not
# have to be on a grid):
theta = np.linspace(0., np.pi, 7)
phi = np.linspace(0., 2*np.pi, 9)
data = np.empty((theta.shape[0], phi.shape[0]))
data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
data[1:-1,1], data[1:-1,-1] = 1., 1.
data[1,1:-1], data[-2,1:-1] = 1., 1.
data[2:-2,2], data[2:-2,-2] = 2., 2.
data[2,2:-2], data[-3,2:-2] = 2., 2.
data[3,3:-2] = 3.
data = np.roll(data, 4, 1)
# We need to set up the interpolator object
lats, lons = np.meshgrid(theta, phi)
from scipy.interpolate import SmoothSphereBivariateSpline
lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), s=3.5)
# As a first test, we'll see what the algorithm returns when run on the
# input coordinates
data_orig = lut(theta, phi)
# Finally we interpolate the data to a finer grid
fine_lats = np.linspace(0., np.pi, 70)
fine_lons = np.linspace(0., 2 * np.pi, 90)
data_smth = lut(fine_lats, fine_lons)
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax1.imshow(data, interpolation='nearest')
ax2 = fig.add_subplot(132)
ax2.imshow(data_orig, interpolation='nearest')
ax3 = fig.add_subplot(133)
ax3.imshow(data_smth, interpolation='nearest')
plt.show()
| gpl-2.0 |
pnedunuri/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
maestrotf/pymepps | examples/example_plot_thredds.py | 2 | 5111 | """
Load a thredds dataset
======================
In the following example we will load a thredds dataset from the
norwegian met.no thredds server.
"""
import numpy as np
import matplotlib.pyplot as plt
import pymepps
######################################################################
# The first step is to load the dataset. This will be performed with
# pymepps.open\_model\_dataset. The NetCDF4 backend is also supporting
# opendap paths. So we could specify nc as data type.
#
metno_path = 'http://thredds.met.no/thredds/dodsC/meps25files/' \
'meps_det_pp_2_5km_latest.nc'
metno_ds = pymepps.open_model_dataset(metno_path, 'nc')
######################################################################
# The resulting dataset is a SpatialDataset. The dataset has several
# methods to load a xr.DataArray from the path. It also possible to print
# the content of the dataset. The content contains the dataset type, the
# number of file handlers within the dataset and all available data
# variables.
#
print(metno_ds)
######################################################################
# The next step is to select/extract a variable from the Dataset. We will
# select the air temperature in 2 metre height and print the content of
# the resulting data
#
metno_t2m = metno_ds.select('air_temperature_2m')
print(metno_t2m)
metno_t2m.isel(validtime=0).plot()
plt.show()
######################################################################
# We could see that the resulting data is a normal xarray.DataArray and
# all of the DataArray methods could be used. The coordinates of the
# DataArray are normalized. The DataArray is expanded with an accessor.
# Also the coordinates are normalized. We could access the accessor with
# metno\_t2m.pp. The main methods of the accessor are allowing a grid
# handling. So our next step is to explore the grid of the DataArray.
#
print(metno_t2m.pp.grid)
######################################################################
# We could see that the grid is a grid with a defined projection. In our
# next step we will slice out an area around Hamburg. We will see that a
# new DataArray with a new grid is created.
#
hh_bounds = [9, 54, 11, 53]
t2m_hh = metno_t2m.pp.sellonlatbox(hh_bounds)
print(t2m_hh.pp.grid)
print(t2m_hh)
######################################################################
# We sliced a longitude and latitude box around the given grid. So we
# sliced the data in a longitude and latitude projection. Our original
# grid was in another projection with unstructured lat lon coordinates. So
# it is not possible to create a structured grid based on this slice. So
# the grid becomes an unstructured grid. In the next step we will show the
# remapping capabilities of the pymepps grid structure.
#
######################################################################
# If we slice the data we have seen that the structured grid could not
# maintained. So in the next step we will create a structured LonLatGrid
# from scratch. After the grid building we will remap the raw DataArray
# basen on the new grid.
#
# The first step is to calculate the model resolution in degree.
#
res = 2500 # model resolution in metre
earth_radius = 6371000 # Earth radius in metre
res_deg = np.round(res*360/(earth_radius*2*np.pi), 4)
# rounded model resolution equivalent in degree if it where on the equator
print(res_deg)
######################################################################
# Our next step is to build the grid. The grid implementation is inspired
# by the climate data operators. So to build the grid we will use the same
# format.
#
grid_dict = dict(
gridtype='lonlat',
xsize=int((hh_bounds[2]-hh_bounds[0])/res_deg),
ysize=int((hh_bounds[1]-hh_bounds[3])/res_deg),
xfirst=hh_bounds[0],
xinc=res_deg,
yfirst=hh_bounds[3],
yinc=res_deg,
)
######################################################################
# Now we use our grid dict together with the GridBuilder to build our
# grid.
#
builder = pymepps.GridBuilder(grid_dict)
hh_grid = builder.build_grid()
print(hh_grid)
######################################################################
# Now we created the grid. The next step is a remapping of the raw
# DataArray to the new Grid. We will use th enearest neighbour approach to
# remap the data.
#
t2m_hh_remapped = metno_t2m.pp.remapnn(hh_grid)
print(t2m_hh_remapped)
######################################################################
# To plot the data in a map, we have to slice the data. We will select the
# first validtime as plotting parameter.
#
t2m_hh_remapped.isel(validtime=0).plot()
plt.show()
######################################################################
# In the map around Hamburg we could see the north and baltic sea in the
# top edges. But with the nearest enighbour approach we retain some of the
# sharp edges at the map. Our last step is a second remap plot, this time
# with a bilinear approach.
#
# sphinx_gallery_thumbnail_number = 3
metno_t2m.pp.remapbil(hh_grid).isel(validtime=0).plot()
plt.show() | gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.