repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sdiazb/airflow | airflow/www/views.py | 3 | 97885 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.logging import LoggingMixin, get_log_filename
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
from airflow.configuration import AirflowConfigException
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
def _get_log(self, ti, log_filename):
"""
Get log for a specific try number.
:param ti: current task instance
:param log_filename: relative filename to fetch the log
"""
# TODO: This is not the best practice. Log handler and
# reader should be configurable and separated from the
# frontend. The new airflow logging design is in progress.
# Please refer to #2422(https://github.com/apache/incubator-airflow/pull/2422).
log = ''
# Load remote log
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log_loaded = False
if remote_log_base:
remote_log_path = os.path.join(remote_log_base, log_filename)
remote_log = ""
# S3
if remote_log_path.startswith('s3:/'):
s3_log = log_utils.S3Log()
if s3_log.log_exists(remote_log_path):
remote_log += s3_log.read(remote_log_path, return_error=True)
remote_log_loaded = True
# GCS
elif remote_log_path.startswith('gs:/'):
gcs_log = log_utils.GCSLog()
if gcs_log.log_exists(remote_log_path):
remote_log += gcs_log.read(remote_log_path, return_error=True)
remote_log_loaded = True
# unsupported
else:
remote_log += '*** Unsupported remote log location.'
if remote_log:
log += ('*** Reading remote log from {}.\n{}\n'.format(
remote_log_path, remote_log))
# We only want to display local log if the remote log is not loaded.
if not remote_log_loaded:
# Load local log
local_log_base = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
local_log_path = os.path.join(local_log_base, log_filename)
if os.path.exists(local_log_path):
try:
f = open(local_log_path)
log += "*** Reading local log.\n" + "".join(f.readlines())
f.close()
except:
log = "*** Failed to load local log file: {0}.\n".format(local_log_path)
else:
WORKER_LOG_SERVER_PORT = conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{ti.hostname}:{WORKER_LOG_SERVER_PORT}/log", log_filename
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.raise_for_status()
log += '\n' + response.text
except:
log += "*** Failed to fetch log file from work r.\n".format(
**locals())
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
return log
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
TI = models.TaskInstance
session = Session()
ti = session.query(TI).filter(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == dttm).first()
logs = []
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logs = [''] * ti.try_number
for try_number in range(ti.try_number):
log_filename = get_log_filename(
dag_id, task_id, execution_date, try_number)
logs[try_number] += self._get_log(ti, log_filename)
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.now()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = dateutil.parser.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = dateutil.parser.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.now()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
qry = session.query(DM)
qry_fltr = []
if do_filter and owner_mode == 'ldapgroup':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners.in_(current_user.ldap_groups)
).all()
elif do_filter and owner_mode == 'user':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
).all()
else:
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active
).all()
# optionally filter out "paused" dags
if hide_paused:
orm_dags = {dag.dag_id: dag for dag in qry_fltr if not dag.is_paused}
else:
orm_dags = {dag.dag_id: dag for dag in qry_fltr}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
all_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags,
orm_dags=orm_dags,
hide_paused=hide_paused,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
page_size = 20
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun)\
.filter(models.DagRun.id.in_(ids))\
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
zehpunktbarron/iOSMAnalyzer | scripts/c5_tag_completeness_shop.py | 1 | 10683 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the development of the tag-completeness [%] of all "shop" POIs
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
--
-- Geschäfte
--
SELECT
generate_series,
-- START Key "name"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_name * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_name,
-- END Key "name"
-- START Key "operator"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_operator * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_operator,
-- END Key "operator"
-- START Key "opening_hours"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_opening * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_opening,
-- END Key "opening_hours"
-- START Key "website"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_website * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_website,
-- END Key "website"
-- START Key "housenumber"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_housenumber * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_housenumber,
-- END Key "housenumber"
-- START Key "phone"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_phone * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_phone,
-- END Key "phone"
-- START Key "wheelchair"
(CASE WHEN
cnt_total <> 0
THEN
ROUND((cnt_wheelchair * 100.00 / cnt_total), 2)
ELSE 0
END)::float AS perc_wheelchair
-- END Key "wheelchair"
FROM
(SELECT generate_series,
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'name'
) AS cnt_name,
-- START operator
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'operator'
) AS cnt_operator,
-- END operator
-- START opening_hours
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'opening_hours'
) AS cnt_opening,
-- END opening_hours
-- START website
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'website'
) AS cnt_website,
-- END website
-- START housenumber
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'addr:housenumber'
) AS cnt_housenumber,
-- END housenumber
-- START phone
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'phone'
) AS cnt_phone,
-- END phone
-- START wheelchair
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
WHERE
skeys = 'wheelchair'
) AS cnt_wheelchair,
-- END wheelchair
-- START total
(SELECT
count(distinct id)
FROM
(SELECT
id,
skeys(tags)
FROM
hist_plp h
WHERE
-- Geschäfte
tags ? 'shop'
AND visible = 'true'
AND
(version = (SELECT max(version) FROM hist_plp WHERE typ = h.typ AND h.id = hist_plp.id) AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))
AND minor = (SELECT max(minor) from hist_plp where typ = h.typ AND h.id = hist_plp.id AND h.version = hist_plp.version AND
( valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS foo
) AS cnt_total
-- END total
FROM generate_series(
(SELECT date_trunc ('month',(
SELECT MIN(valid_from) FROM hist_plp)) as foo), -- Select minimum date (month)
(SELECT MAX(valid_from) FROM hist_plp)::date, -- Select maximum date
interval '1 month')
) AS foo2
;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Multiline-Chart)
###
# Datatypes of the returning data
datatypes = [('date', 'S20'),('col2', 'double'), ('col3', 'double'), ('col4', 'double'), ('col5', 'double'), ('col6', 'double'), ('col7', 'double'), ('col8', 'double')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col2 = data['col2']
col3 = data['col3']
col4 = data['col4']
col5 = data['col5']
col6 = data['col6']
col7 = data['col7']
col8 = data['col8']
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data['date']))
fig, ax = plt.subplots()
# set figure size
fig.set_size_inches(12,8)
# Create linechart
plt.plot(dates, col2, color = '#2dd700', linewidth=2, label='name = *')
plt.plot(dates, col3, color = '#ff6700', linewidth=2, linestyle='dashed', label='operator = *')
plt.plot(dates, col4, color = '#00a287', linewidth=2, label='opening_hours = *')
plt.plot(dates, col5, color = '#ff6700', linewidth=2, label='website = *')
plt.plot(dates, col6, color = '#f5001d', linewidth=2, label='addr:housenumber = *')
plt.plot(dates, col7, color = '#2dd700', linewidth=2, linestyle='dashed', label='phone = *')
plt.plot(dates, col8, color = '#00a287', linewidth=2, linestyle='dashed', label='wheelchair = *')
# Forces the plot to start from 0 and end at 100
pylab.ylim([0,100])
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Tag-Completeness [%]')
# Locate legend on the plot (http://matplotlib.org/users/legend_guide.html#legend-location)
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height * 0.9])
# Put a legend to the right of the current axis and reduce the font size
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':9})
# Plot-title
plt.title('Development of the Tag-Completeness of all Shop POIs')
# Save plot to *.jpeg-file
plt.savefig('pics/c5_tag_completeness_shop.jpeg')
plt.clf()
| gpl-3.0 |
mistercrunch/panoramix | superset/utils/csv.py | 2 | 3022 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import urllib.request
from typing import Any, Dict, Optional
from urllib.error import URLError
import pandas as pd
negative_number_re = re.compile(r"^-[0-9.]+$")
# This regex will match if the string starts with:
#
# 1. one of -, @, +, |, =, %
# 2. two double quotes immediately followed by one of -, @, +, |, =, %
# 3. one or more spaces immediately followed by one of -, @, +, |, =, %
#
problematic_chars_re = re.compile(r'^(?:"{2}|\s{1,})(?=[\-@+|=%])|^[\-@+|=%]')
def escape_value(value: str) -> str:
"""
Escapes a set of special characters.
http://georgemauer.net/2017/10/07/csv-injection.html
"""
needs_escaping = problematic_chars_re.match(value) is not None
is_negative_number = negative_number_re.match(value) is not None
if needs_escaping and not is_negative_number:
# Escape pipe to be extra safe as this
# can lead to remote code execution
value = value.replace("|", "\\|")
# Precede the line with a single quote. This prevents
# evaluation of commands and some spreadsheet software
# will hide this visually from the user. Many articles
# claim a preceding space will work here too, however,
# when uploading a csv file in Google sheets, a leading
# space was ignored and code was still evaluated.
value = "'" + value
return value
def df_to_escaped_csv(df: pd.DataFrame, **kwargs: Any) -> Any:
escape_values = lambda v: escape_value(v) if isinstance(v, str) else v
# Escape csv headers
df = df.rename(columns=escape_values)
# Escape csv rows
df = df.applymap(escape_values)
return df.to_csv(**kwargs)
def get_chart_csv_data(
chart_url: str, auth_cookies: Optional[Dict[str, str]] = None
) -> Optional[bytes]:
content = None
if auth_cookies:
opener = urllib.request.build_opener()
cookie_str = ";".join([f"{key}={val}" for key, val in auth_cookies.items()])
opener.addheaders.append(("Cookie", cookie_str))
response = opener.open(chart_url)
content = response.read()
if response.getcode() != 200:
raise URLError(response.getcode())
if content:
return content
return None
| apache-2.0 |
srv/stereo_slam | scripts/slam_evaluation.py | 1 | 13711 | #!/usr/bin/env python
import roslib; roslib.load_manifest('stereo_slam')
import pylab
import numpy as np
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import tf.transformations as tf
import scipy.optimize as optimize
import collections
import math
import string
class Error(Exception):
""" Base class for exceptions in this module. """
pass
def trajectory_distances(data):
dist = []
dist.append(0)
for i in range(len(data) - 1):
dist.append(dist[i] + calc_dist(data[i, :], data[i + 1, : ]))
return dist
def calc_dist_xyz(data_point1, data_point2):
xdiff = data_point1[1] - data_point2[1]
ydiff = data_point1[2] - data_point2[2]
zdiff = data_point1[3] - data_point2[3]
return xdiff, ydiff, zdiff
def calc_dist(data_point1, data_point2):
xdiff, ydiff, zdiff = calc_dist_xyz(data_point1, data_point2)
return math.sqrt(xdiff*xdiff + ydiff*ydiff + zdiff*zdiff)
def to_transform(data_point):
t = [data_point[1], data_point[2], data_point[3]]
q = [data_point[4], data_point[5], data_point[6], data_point[7]]
rot_mat = tf.quaternion_matrix(q)
trans_mat = tf.translation_matrix(t)
return tf.concatenate_matrices(trans_mat, rot_mat)
def rebase(base_vector, rebased_vector):
min_idx_vec = []
for i in range(len(base_vector)):
# Search the best coincidence in time with ground truth
min_dist = 99999
min_idx = -1
for j in range(len(rebased_vector)):
dist = abs(rebased_vector[j,0] - base_vector[i,0])
if dist < min_dist:
min_dist = dist
min_idx = j
min_idx_vec.append(min_idx)
min_idx_vec = np.array(min_idx_vec)
return rebased_vector[min_idx_vec,:]
def apply_tf_to_matrix(tf_delta, data):
corrected_data = []
for i in range(len(data)):
point = to_transform(data[i,:])
point_d = tf.concatenate_matrices(point, tf_delta)
t_d = tf.translation_from_matrix(point_d)
q_d = tf.quaternion_from_matrix(point_d)
corrected_data.append([data[i,0], t_d[0], t_d[1], t_d[2], q_d[0], q_d[1], q_d[2], q_d[3]])
return np.array(corrected_data)
def apply_tf_to_vector(tf_delta, data):
corrected_data = []
for i in range(len(data)):
point = to_transform(data[i,:])
t_d = tf.translation_from_matrix(point)
q_d = tf.quaternion_from_matrix(point)
t_d = np.array([t_d[0], t_d[1], t_d[2], 1.0])
point_mod = tf.concatenate_matrices(tf_delta, t_d)
corrected_data.append([data[i,0], point_mod[0], point_mod[1], point_mod[2], t_d[2], q_d[0], q_d[1], q_d[2], q_d[3]])
return np.array(corrected_data)
def quaternion_from_rpy(roll, pitch, yaw):
q = []
q.append( np.cos(roll/2)*np.cos(pitch/2)*np.cos(yaw/2) + np.sin(roll/2)*np.sin(pitch/2)*np.sin(yaw/2))
q.append( np.sin(roll/2)*np.cos(pitch/2)*np.cos(yaw/2) - np.cos(roll/2)*np.sin(pitch/2)*np.sin(yaw/2))
q.append( -np.cos(roll/2)*np.sin(pitch/2)*np.cos(yaw/2) - np.sin(roll/2)*np.cos(pitch/2)*np.sin(yaw/2))
q.append( np.cos(roll/2)*np.cos(pitch/2)*np.sin(yaw/2) - np.sin(roll/2)*np.sin(pitch/2)*np.cos(yaw/2))
q = np.array(q)
return q
def sigmoid(p, vertices, gt_rebased):
# Get the current parameter set and build the transform
roll, pitch, yaw = p
q = quaternion_from_rpy(roll, pitch, yaw)
cur_delta = to_transform([0.0, 0.0, 0.0, 0.0, q[0], q[1], q[2], q[3]])
# Compute the quadratic error for the current delta transformation
err = 0.0
for i in range(len(vertices)):
# Compute the corrected ground truth
tf_gt = to_transform(gt_rebased[i])
tf_gt_t = tf.translation_from_matrix(tf_gt)
tf_gt_t = np.array([tf_gt_t[0], tf_gt_t[1], tf_gt_t[2], 1.0])
tf_gt_corrected = tf.concatenate_matrices(cur_delta, tf_gt_t)
tf_gt_corr_vect = [0.0, tf_gt_corrected[0], tf_gt_corrected[1], tf_gt_corrected[2], 0.0, 0.0, 0.0, 1.0]
# Compute the error
err += np.power(calc_dist(vertices[i], tf_gt_corr_vect), 2)
return np.sqrt(err)
def calc_errors(vector_1, vector_2):
# Compute the errors between vectors
assert(len(vector_1) == len(vector_1))
output = []
for i in range(len(vector_1)):
output.append(calc_dist(vector_1[i,:], vector_2[i,:]))
return np.array(output)
def calc_time_vector(data):
output = []
start_time = data[0,0]
for i in range(len(data)):
output.append((data[i,0] - start_time))
return np.array(output)
def toRSTtable(rows, header=True, vdelim=" ", padding=1, justify='right'):
"""
Outputs a list of lists as a Restructured Text Table
- rows - list of lists
- header - if True the first row is treated as a table header
- vdelim - vertical delimiter between columns
- padding - nr. of spaces are left around the longest element in the column
- justify - may be left, center, right
"""
border="=" # character for drawing the border
justify = {'left':string.ljust,'center':string.center,'right':string.rjust}[justify.lower()]
# calculate column widhts (longest item in each col
# plus "padding" nr of spaces on both sides)
cols = zip(*rows)
colWidths = [max([len(str(item))+2*padding for item in col]) for col in cols]
# the horizontal border needed by rst
borderline = vdelim.join([w*border for w in colWidths])
# outputs table in rst format
output = ""
output += borderline + "\n"
for row in rows:
output += vdelim.join([justify(str(item),width) for (item,width) in zip(row,colWidths)])
output += "\n"
if header: output += borderline + "\n"; header=False
output += borderline + "\n"
print output
return output
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Plot 3D graphics of SLAM.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('ground_truth_file',
help='file with ground truth')
parser.add_argument('visual_odometry_file',
help='file with visual odometry')
parser.add_argument('graph_vertices_file',
help='file the vertices of stereo slam')
parser.add_argument('graph_edges_file',
help='file the edges of stereo slam')
parser.add_argument('orb_file',
help='file corresponding to the ORB-SLAM trajectory')
args = parser.parse_args()
colors = ['g','r','b']
angles = [-6, 6, -4, 4, -12, 12]
# Setup the font for the graphics
font = {'family' : 'Sans',
'weight' : 'normal',
'size' : 35}
pylab.rc('font', **font)
linewidth = 3.0
# Log
print "Adjusting curves, please wait..."
# Init figures
fig0 = pylab.figure()
ax0 = Axes3D(fig0)
ax0.grid(True)
ax0.set_xlabel("x (m)")
ax0.set_ylabel("y (m)")
ax0.set_zlabel("z (m)")
fig1 = pylab.figure()
ax1 = fig1.gca()
ax1.grid(True)
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
fig2 = pylab.figure()
ax2 = fig2.gca()
ax2.grid(True)
ax2.set_xlabel("x (m)")
ax2.set_ylabel("y (m)")
fig3 = pylab.figure()
ax3 = fig3.gca()
ax3.grid(True)
ax3.set_xlabel("x (m)")
ax3.set_ylabel("y (m)")
fig4 = pylab.figure()
ax4 = fig4.gca()
ax4.grid(True)
ax4.set_xlabel("x (m)")
ax4.set_ylabel("y (m)")
# Load ground truth (gt) data.
# Check the file type
f = open(args.ground_truth_file)
lines = f.readlines()
f.close()
size = lines[1].split(",")
if (len(size) == 8 or len(size) >= 12):
gt = pylab.loadtxt(args.ground_truth_file, delimiter=',', comments='%', usecols=(0,5,6,7,8,9,10,11))
gt[:,0] = gt[:,0] / 1000000000
else:
gt = pylab.loadtxt(args.ground_truth_file, delimiter=',', comments='%', usecols=(0,1,2,3,4,5,6,7))
# Load the visual odometry
odom = pylab.loadtxt(args.visual_odometry_file, delimiter=',', comments='%', usecols=(0,5,6,7,8,9,10,11))
# odom = pylab.loadtxt(args.visual_odometry_file, delimiter=',', comments='%', usecols=(0,4,5,6,7,8,9,10))
odom[:,0] = odom[:,0] / 1000000000
# Load the graph vertices
vertices = pylab.loadtxt(args.graph_vertices_file, delimiter=',', usecols=(0,2,3,4,5,6,7,8))
# Load the ORB-SLAM trajectory
orb = pylab.loadtxt(args.orb_file, delimiter=',', usecols=(0,2,3,4,5,6,7,8))
# Get the gt indeces for all graph vertices
gt_rebased = rebase(vertices, gt)
odom_rebased = rebase(vertices, odom)
orb_rebased = rebase(vertices, orb)
# Compute the translation to make the same origin for all curves
first_vertice = to_transform(vertices[0,:])
first_gt_coincidence = to_transform(gt_rebased[0,:])
tf_delta = tf.concatenate_matrices(tf.inverse_matrix(first_gt_coincidence), first_vertice)
# Move all the gt and odometry points with the correction
gt_moved = apply_tf_to_matrix(tf_delta, gt)
gt_rb_moved = apply_tf_to_matrix(tf_delta, gt_rebased)
odom_moved = apply_tf_to_matrix(tf_delta, odom)
odom_rb_moved = apply_tf_to_matrix(tf_delta, odom_rebased)
orb_moved = apply_tf_to_matrix(tf_delta, orb)
orb_rb_moved = apply_tf_to_matrix(tf_delta, orb_rebased)
# Transform optimization
Param = collections.namedtuple('Param','roll pitch yaw')
rranges = ((angles[0]*np.pi/180, angles[1]*np.pi/180, 0.04), (angles[2]*np.pi/180, angles[3]*np.pi/180, 0.02), (angles[4]*np.pi/180, angles[5]*np.pi/180, 0.04))
p = optimize.brute(sigmoid, rranges, args=(vertices, gt_rebased))
p = Param(*p)
# Build the rotation matrix
roll, pitch, yaw = p
q = quaternion_from_rpy(roll, pitch, yaw)
tf_correction = to_transform([0.0, 0.0, 0.0, 0.0, q[0], q[1], q[2], q[3]])
# Correct ground truth and odometry
gt_corrected = apply_tf_to_vector(tf_correction, gt_moved)
gt_rb_corrected = apply_tf_to_vector(tf_correction, gt_rb_moved)
odom_corrected = apply_tf_to_vector(tf_correction, odom_moved)
odom_rb_corrected = apply_tf_to_vector(tf_correction, odom_rb_moved)
orb_corrected = apply_tf_to_vector(tf_correction, orb_moved)
orb_rb_corrected = apply_tf_to_vector(tf_correction, orb_rb_moved)
# Compute the errors
print "Computing errors, please wait..."
gt_dist = trajectory_distances(gt_rb_corrected)
odom_dist = trajectory_distances(odom_rb_corrected)
vertices_dist = trajectory_distances(vertices)
orb_dist = trajectory_distances(orb_rb_corrected)
odom_errors = calc_errors(gt_rb_corrected, odom_rb_corrected)
vertices_errors = calc_errors(gt_rb_corrected, vertices)
orb_errors = calc_errors(gt_rb_corrected, orb_rb_corrected)
time = calc_time_vector(gt_rb_corrected)
odom_mae = np.average(np.abs(odom_errors), 0)
vertices_mae = np.average(np.abs(vertices_errors), 0)
orb_mae = np.average(np.abs(orb_errors), 0)
rows = []
rows.append(['Viso2'] + [len(odom_errors)] + [odom_dist[-1]] + [odom_mae])
rows.append(['ORB-SLAM'] + [len(orb_errors)] + [orb_dist[-1]] + [orb_mae])
rows.append(['Stereo-SLAM'] + [len(vertices_errors)] + [vertices_dist[-1]] + [vertices_mae])
# Build the header for the output table
header = [ "Input", "Data Points", "Traj. Distance (m)", "Trans. MAE (m)"]
toRSTtable([header] + rows)
print "Ground truth distance: ", gt_dist[-1], "m"
# Plot graph (3D)
ax0.plot(gt_corrected[:,1], gt_corrected[:,2], gt_corrected[:,3], colors[0], linewidth=linewidth, label='Ground Truth')
ax0.plot(odom_corrected[:,1], odom_corrected[:,2], odom_corrected[:,3], colors[1], linewidth=linewidth, label='Viso2')
ax0.plot(orb_corrected[:,1], orb_corrected[:,2], orb_corrected[:,3], 'y', linewidth=linewidth, label='ORB-SLAM')
ax0.plot(vertices[:,1], vertices[:,2], vertices[:,3], colors[2], linewidth=linewidth, label='Stereo-SLAM', marker='o')
# Plot graph (2D)
ax1.plot(gt_corrected[:,1], gt_corrected[:,2], colors[0], linewidth=linewidth, label='Ground truth')
ax1.plot(odom_corrected[:,1], odom_corrected[:,2], colors[1], linewidth=linewidth, label='Viso2')
ax1.plot(orb_corrected[:,1], orb_corrected[:,2], 'y', linewidth=linewidth, label='ORB-SLAM')
ax1.plot(vertices[:,1], vertices[:,2], colors[2], linewidth=linewidth, label='Stereo-SLAM', marker='o')
# Plot the graph edges
f = open(args.graph_edges_file)
lines = f.readlines()
f.close()
if (len(lines) > 0):
edges = pylab.loadtxt(args.graph_edges_file, delimiter=',', usecols=(3,4,5,10,11,12))
for i in range(len(edges)):
vect = []
vect.append([edges[i,0], edges[i,1], edges[i,2]])
vect.append([edges[i,3], edges[i,4], edges[i,5]])
vect = np.array(vect)
ax0.plot(vect[:,0], vect[:,1], vect[:,2], colors[2], linewidth=linewidth-1, linestyle='--')
ax1.plot(vect[:,0], vect[:,1], colors[2], linewidth=linewidth-1, linestyle='--')
ax0.legend(loc=2)
ax1.legend(loc=2)
# Plot individual graphs (2D)
ax2.plot(gt_corrected[:,1], gt_corrected[:,2], 'g', linewidth=linewidth, label='Ground truth')
ax2.plot(odom_corrected[:,1], odom_corrected[:,2], 'b', linewidth=linewidth, label='Viso2')
ax2.legend(loc=2)
ax3.plot(gt_corrected[:,1], gt_corrected[:,2], 'g', linewidth=linewidth, label='Ground truth')
ax3.plot(orb_corrected[:,1], orb_corrected[:,2], 'b', linewidth=linewidth, label='ORB-SLAM')
ax3.legend(loc=2)
ax4.plot(gt_corrected[:,1], gt_corrected[:,2], 'g', linewidth=linewidth, label='Ground truth')
ax4.plot(vertices[:,1], vertices[:,2], 'b', linewidth=linewidth, label='Stereo-SLAM')
ax4.legend(loc=2)
# Plot errors
fig5 = pylab.figure()
ax5 = fig5.gca()
ax5.plot(odom_dist, odom_errors, colors[1], linewidth=linewidth, label='Viso2')
ax5.plot(orb_dist, orb_errors, 'g', linewidth=linewidth, label='ORB-SLAM')
ax5.plot(vertices_dist, vertices_errors, colors[2], linewidth=linewidth, label='Stereo-SLAM')
ax5.grid(True)
ax5.set_xlabel("Distance (m)")
ax5.set_ylabel("Error (m)")
ax5.legend(loc=2)
ax5.tick_params(axis='both', which='major', labelsize=40);
ax5.set_xlim(0, 52)
pyplot.draw()
pylab.show() | bsd-3-clause |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/tests/test_feedback_calculations.py | 3 | 4665 | import numpy as np
import pandas as pd
from path_helpers import path
def compare_results_to_reference(method, reference_results_file, id=1, filter_order=None):
input_file = path(__file__).parent / path('FeedbackResults') / \
path('input_1.pickle')
data = input_file.pickle_load()
# add absolute path
reference_results_file = path(__file__).parent / path('FeedbackResults') / \
reference_results_file
f = pd.HDFStore(reference_results_file, 'r')
reference_results_df = f['/root']
f.close()
order = filter_order
if filter_order is None:
# filter_order=None is represented as -1
order = -1
# filter the results based on id and filter_order
reference_results_df = reference_results_df[(reference_results_df['id'] == id) &
(reference_results_df['filter_order'] == order)]
if method in ['force', 'V_actuation']:
results = eval('data.%s()' % method)
elif method in ['dxdt']:
t, results = eval('data.%s(filter_order=%s)' % (method, filter_order))
else:
results = eval('data.%s(filter_order=%s)' % (method, filter_order))
# re-cast masked array as normal array
results = np.array(results)
max_diff = np.max(np.abs(reference_results_df[method].values - results))
if max_diff > 1e-14:
print max_diff
assert max_diff == 0
nan_dif = np.isnan(reference_results_df[method].values) ^ np.isnan(results)
if np.any(nan_dif):
print "Differences exist in the 'np.isnan(x)' status for some values."
assert False
def test_V_actuation():
compare_results_to_reference('V_actuation', 'reference_results.hdf')
def test_x_position():
compare_results_to_reference('x_position', 'reference_results.hdf')
def test_force():
compare_results_to_reference('force', 'reference_results.hdf')
def test_Z_device():
compare_results_to_reference('Z_device', 'reference_results.hdf')
def test_Z_device_filter_order_3():
compare_results_to_reference('Z_device', 'reference_results.hdf', filter_order=3)
def test_capacitance():
compare_results_to_reference('capacitance', 'reference_results.hdf')
def test_velocity():
compare_results_to_reference('dxdt', 'reference_results_velocity.hdf')
def test_velocity_filter_order_3():
compare_results_to_reference('dxdt', 'reference_results_velocity.hdf', filter_order=3)
def generate_feedback_results_reference(data, id):
input_file = path(__file__).parent / path('FeedbackResults') / \
path('input_%s.pickle' % id)
# save a pickled version of the feedback results object
input_file.pickle_dump(data, -1)
for fname in ['reference_results.hdf', 'reference_results_velocity.hdf']:
reference_results_file = path(__file__).parent / \
path('FeedbackResults') / path(fname)
if reference_results_file.exists():
f = pd.HDFStore(reference_results_file, 'r')
reference_results_df = f['/root']
f.close()
# remove any existing data with the same id
reference_results_df = reference_results_df[reference_results_df['id'] != id]
else:
reference_results_df = pd.DataFrame()
for filter_order in [None, 3]:
if fname == 'reference_results_velocity.hdf':
t, dxdt = data.dxdt(filter_order=filter_order)
d = {'dxdt': dxdt,
'time': t,
}
else:
d = {'V_actuation': data.V_actuation(),
'force': data.force(),
'Z_device': data.Z_device(filter_order=filter_order),
'capacitance': data.capacitance(filter_order=filter_order),
'x_position': data.x_position(filter_order=filter_order),
'time': data.time,
}
results_df = pd.DataFrame(d)
results_df['id'] = id
# store filter_order=None as -1
if filter_order is None:
results_df['filter_order'] = -1
else:
results_df['filter_order'] = filter_order
reference_results_df = reference_results_df.append(results_df)
# save an hdf file containing the reference feedback results calculations
reference_results_df.to_hdf(reference_results_file, '/root', complib='blosc', complevel=2)
def test_get_window_size():
input_file = path(__file__).parent / path('FeedbackResults') / \
path('input_1.pickle')
data = input_file.pickle_load()
assert data._get_window_size() == 21.0
| bsd-3-clause |
dpaiton/OpenPV | pv-core/analysis/python/plot_feature_histogram.py | 1 | 4997 | """
Plots the Histogram
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
import random
if len(sys.argv) < 2:
print "usage: time_stability filename"
print len(sys.argv)
sys.exit()
w = rw.PVReadWeights(sys.argv[1])
space = 1
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 15
marginstart = margin
marginend = nx - margin
acount = 0
patchposition = []
supereasytest = 1
d = np.zeros((nxp,nyp))
# create feature list for comparing weights from on and off cells
f = np.zeros(w.patchSize)
f2 = np.zeros(w.patchSize)
fe1 = []
fe2 = []
fe3 = []
fe4 = []
fe5 = []
fe6 = []
fe7 = []
fe8 = []
fcomp = []
f = w.normalize(f)
f2 = w.normalize(f2)
# vertical lines from right side
f = np.zeros([w.nxp, w.nyp]) # first line
f[:,0] = 1
fe1.append(f)
f = np.zeros([w.nxp, w.nyp]) # second line
f[:,1] = 1
fe2.append(f)
f2 = np.zeros([w.nxp, w.nyp]) # third line
f2[:,2] = 1
fe3.append(f2)
f = np.zeros([w.nxp, w.nyp])
f[:,3] = 1
fe4.append(f)
#horizontal lines from the top
f = np.zeros([w.nxp, w.nyp])
f[0,:] = 1
fe5.append(f)
f = np.zeros([w.nxp, w.nyp])
f[1,:] = 1
fe6.append(f)
f = np.zeros([w.nxp, w.nyp])
f[2,:] = 1
fe7.append(f)
f = np.zeros([w.nxp, w.nyp])
f[3,:] = 1
fe8.append(f)
#print "f8", fe8
#print "f7", fe7
#print "f6", fe6
#print "f5", fe5
#print "f4", fe4
#print "f3", fe3
#print "f2", fe2
#print "f1", fe1
def whatFeature(k):
result = []
fcomp = []
k = np.reshape(k,(nxp,nyp))
f1 = k * fe1
f1 = np.sum(f1)
fcomp.append(f1)
#print f1
f2 = k * fe2
f2 = np.sum(f2)
#print f2
fcomp.append(f2)
f3 = k * fe3
f3 = np.sum(f3)
#print f3
fcomp.append(f3)
f4 = k * fe4
f4 = np.sum(f4)
#print f4
fcomp.append(f4)
f5 = k * fe5
f5 = np.sum(f5)
#print f5
fcomp.append(f5)
f6 = k * fe6
f6 = np.sum(f6)
#print f6
fcomp.append(f6)
f7 = k * fe7
f7 = np.sum(f7)
#print f7
fcomp.append(f7)
f8 = k * fe8
f8 = np.sum(f8)
#print f8
fcomp.append(f8)
fcomp = np.array(fcomp)
t = fcomp.argmax()
check = fcomp.max() / 4
if check > 0.7:
1
else:
result = [10]
return result
maxp = np.max(fcomp)
if maxp == f1:
#print "f1"
result.append(1)
if maxp == f2:
#print "f2"
result.append(2)
if maxp == f3:
#print "f3"
result.append(3)
if maxp == f4:
#print "f4"
result.append(4)
if maxp == f5:
#print "f5"
result.append(5)
if maxp == f6:
#print "f6"
result.append(6)
if maxp == f7:
#print "f7"
result.append(7)
if maxp == f8:
#print "f8"
result.append(8)
if len(result) > 1:
rl = len(result)
ri = random.randint(0, rl)
wn = result[ri-1]
result = []
result.append(wn)
#print "result = ", result
return result
space = 1
w = rw.PVReadWeights(sys.argv[1])
coord = 1
coord = int(coord)
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 32
start = margin
marginend = nx - margin
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
where = []
zep = []
for k in range(numpat):
kx = conv.kxPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
p = w.next_patch()
afz = whatFeature(p)
zep.append(afz)
if len(p) != nxp * nyp:
continue
if marginstart < kx < marginend:
if marginstart < ky < marginend:
acount+=1
a = whatFeature(p)
a = a[0]
if a != 10:
where.append(a)
#print a
count = 0
count1 = 0
count2 = 0
count3 = 0
count4 = 0
count5 = 0
count6 = 0
count7 = 0
count8 = 0
for i in range(len(where)):
if where[i] == 1:
count1 += 1
if where[i] == 2:
count2 += 1
if where[i] == 3:
count3 += 1
if where[i] == 4:
count4 += 1
if where[i] == 5:
count5 += 1
if where[i] == 6:
count6 += 1
if where[i] == 7:
count7 += 1
if where[i] == 8:
count8 += 1
h = [count1, count2, count3, count4, count5, count6, count7, count8]
h2 = [0, count1, count2, count3, count4, count5, count6, count7, count8]
hmax = np.max(h)
hmin = np.min(h)
hratio = float(hmax)/hmin
ptotal = len(where) / float(acount)
print "hmax = ", hmax
print "hmin = ", hmin
print "hratio = ", hratio
print "% of total = ", ptotal
fig = plt.figure()
ax = fig.add_subplot(111)
loc = np.array(range(len(h)))+0.5
width = 1.0
ax.set_title('Feature Histogram')
ax.set_xlabel('Total Number of Features = %1.0i \n ratio = %f \n percent of total = %f' %(len(where), hratio, ptotal))
ax.bar(loc, h, width=width, bottom=0, color='b')
#ax.plot(np.arange(len(h2)), h2, ls = '-', marker = 'o', color='b', linewidth = 4.0)
plt.show()
| epl-1.0 |
adamgreenhall/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
hlin117/scikit-learn | examples/feature_selection/plot_select_from_model_boston.py | 146 | 1527 | """
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| bsd-3-clause |
Zomega/thesis | Wurm/INGEST/fixed_hub.py | 1 | 1510 | #TODO: Generalize state...
def x_dot( x ):
#ti <- theta i
#wi <- theta dot i
#di <- theta double dot i
t1, t2, t3, t4, w1, w2, w3, w4 = x
def t(i):
return [t1, t2, t3, t4][i]
def w(i):
return [w1, w2, w3, w4][i]
def d(i):
return (1/I) * ( torque(i-1, i) - torque(i, i+1) - b * w(i) )
def torque(a, b):
a = a % n
b = b % n
assert abs(a-b) == 1
# TODO
return 0
return w(1), w(2), w(3), w(4), d(1), d(2), d(3), d(4)
def x_dot( x ):
x1, x2 = x
x1 = float(x1)
x2 = float(x2)
#x1_dot = (-6 * x1)/(1 +x1**2)**2 + 2*x2
#x2_dot = -2 * ( x1 + x2 ) / ( 1+x1**2 )**2
#x1_dot = x2 - (x1**3)
#x2_dot = -(x2 ** 3) - x1
x1_dot = x2
x2_dot = -x1 + x1**3 - x2
return( x1_dot, x2_dot )
def traj( x0 ):
x1 = [x0[0]]
x2 = [x0[1]]
dt = 0.0001
def scale( alpha, x ):
return (alpha*x[0], alpha*x[1])
def add( x1, x2 ):
return (x1[0]+x2[0], x1[1]+x2[1])
for i in range(10**3):
x_t = (x1[-1], x2[-1])
x_n = add( x_t, scale( dt, x_dot( x_t ) ) )
x1.append(x_n[0])
x2.append(x_n[1])
return (x1, x2)
import matplotlib.pyplot as plt
from pylab import arange
from random import randint
def plot(traj):
x1, x2 = traj
plt.plot(x1, x2)
plt.xlabel('x1')
plt.ylabel('x2')
for n in range(10):
plot( traj((randint(-20,20),randint(-20,20))))
print n
plt.legend()
plt.show()
| mit |
lin-credible/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
bitforks/freetype-py | examples/glyph-monochrome.py | 4 | 1263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph bitmap monochrome rendring
'''
from freetype import *
def bits(x):
data = []
for i in range(8):
data.insert(0, int((x & 1) == 1))
x = x >> 1
return data
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face('./Vera.ttf')
face.set_char_size( 48*64 )
face.load_char('S', FT_LOAD_RENDER |
FT_LOAD_TARGET_MONO )
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
data = []
for i in range(bitmap.rows):
row = []
for j in range(bitmap.pitch):
row.extend(bits(bitmap.buffer[i*bitmap.pitch+j]))
data.extend(row[:bitmap.width])
Z = numpy.array(data).reshape(bitmap.rows, bitmap.width)
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray, origin='lower')
plt.show()
| bsd-3-clause |
larenzhang/DQN_for_trading | deep_q_network.py | 1 | 9932 | #!/usr/bin/env python
import tensorflow as tf
import cv2
import sys
sys.path.append("Wrapped Game Code/")
import random
import numpy as np
from collections import deque
import os
from PIL import Image
import csv
from os.path import join,getsize,getmtime
import time
import scipy.io as scio
import matplotlib.pyplot as plt
GAME = 'tetris' # the name of the game being played for log files
ACTION_SIZE = 3 # number of valid actions
GAMMA = 0.85 # decay rate of past observations
OBSERVE = 4000 # timesteps to observe before training
EXPLORE = 4000 # frames over which to anneal epsilon
FINAL_EPSILON = 0.05 # final value of epsilon
INITIAL_EPSILON = 0.05 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
K = 1 # only select an action every Kth frame, repeat prev for others
ACTIONS = np.array([1,0,-1]) #long,neutral,short
TRAIN_IMG_PATH = "./dataset/AMZN/AMZN_TRAIN/AMZN_PIC"
TRAIN_MAT_PATH = "./dataset/AMZN/AMZN_TRAIN/AMZN.mat"
TRAIN_REWARD_PATH = "./dataset/AMZN/AMZN_TRAIN/earning.csv"
TEST_IMG_PATH = "./dataset/AMZN/AMZN_TEST/AMZN_PIC"
TEST_MAT_PATH = "./dataset/AMZN/AMZN_TEST/AMZN.mat"
TEST_REWARD_PATH = "./dataset/AMZN/AMZN_TEST/earning.csv"
TEST = True
TRAIN = False
def imgs2tensor(root_dir):
states = []
file_list = os.listdir(root_dir)
path_dict = {}
for i in range(len(file_list)):
path_dict[file_list[i]] = getmtime(join(root_dir,file_list[i]))
sort_list = sorted(path_dict.items(),key=lambda e:e[1],reverse=False)
for i in range(0,len(file_list)):
path = os.path.join(root_dir,sort_list[i][0])
print("path is :",path)
if os.path.isfile(path):
img = cv2.imread("{}".format(path))
img_gray = cv2.cvtColor(cv2.resize(img,(80,80)),cv2.COLOR_BGR2GRAY)
ret, data = cv2.threshold(img_gray,1,255,cv2.THRESH_BINARY)
states.append(data)
print("states shape:",np.shape(states))
# data = cv2.cvtColor(cv2.resize(data_new, (80, 80)), cv2.COLOR_BGR2GRAY)
states_size = np.shape(states)[0]
return states
def getFileOrderByUpdate(path):
file_list = os.listdir(path)
path_dict = {}
for i in range(len(file_list)):
path_dict[file_list[i]] = getmtime(join(path,file_list[i]))
sort_list = sorted(path_dict.items(),key=lambda e:e[1],reverse=False)
for i in range(len(sort_list)):
print(sort_list[i][0],sort_list[i][1])
def get_reward(file_dir):
reward = []
with open(file_dir,'r') as file:
reader = csv.reader(file)
for line in reader:
reward.append(line[0])
reward_float = [float(str) for str in reward]
return reward_float
def create_csv_file(file_name="",data_list=[]):
with open(file_name,"w") as csv_file:
csv_writer = csv.writer(csv_file)
# for data in data_list:
# print("data is:",data)
csv_writer.writerows(map(lambda x:[x],data_list))
csv_file.close
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
def createNetwork():
# network weights
W_conv1 = weight_variable([8, 8, 1, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([4, 4, 32, 64])
b_conv2 = bias_variable([64])
W_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
W_fc1 = weight_variable([1600, 512])
b_fc1 = bias_variable([512])
W_fc2 = weight_variable([512, ACTION_SIZE])
b_fc2 = bias_variable([ACTION_SIZE])
# input layer
s = tf.placeholder("float", [None, 80, 80, 1])
# hidden layers
h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
# h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
#h_pool3 = max_pool_2x2(h_conv3)
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# readout layer
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
return s, readout, h_fc1
def trainNetwork(s, readout, h_fc1, sess):
#define the cost function
a = tf.placeholder("float", [None, ACTION_SIZE])
y = tf.placeholder("float", [None])
#readout_action = tf.reduce_sum(tf.matmul(readout, a), reduction_indices = 1)
readout_action = tf.reduce_sum(tf.matmul(readout,a,transpose_b=True), reduction_indices = 1)
cost = tf.reduce_mean(tf.square(y - readout_action))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
# store the previous observations in replay memory
D = deque()
# if TRAIN:
# if os.path.exists(TRAIN_MAT_PATH):
# states = scio.loadmat(TRAIN_MAT_PATH)
# else:
states = imgs2tensor(TEST_IMG_PATH)
# states_dict = {"states":states}
# scio.savemat("./dataset/AMZN/AMZN_TRAIN/AMZN.mat",states_dict)
# if TEST:
# if os.path.exists(TEST_MAT_PATH):
# states = scio.loadmat(TEST_MAT_PATH)
# else:
# states = imgs2tensor(TEST_IMG_PATH)
# scio.savemat("./dataset/AMZN/AMZN_TEST/AMZN.mat",states)
states_size = np.shape(states)[0]
reward = get_reward(TRAIN_REWARD_PATH)
s_t = np.reshape(states[0],(80,80,1))
# saving and loading networks
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.get_checkpoint_state("saved_networks_1")
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
epsilon = INITIAL_EPSILON
t = 0
cum_reward = []
while "pigs" != "fly":
# choose an action epsilon greedily
if t<states_size-1:
terminal = True
readout_t = readout.eval(feed_dict = {s : [s_t]})[0]
a_t = np.zeros([ACTION_SIZE])
action_index = 0
if random.random() <= epsilon or t <= OBSERVE:
action_index = random.randrange(ACTION_SIZE)
a_t[action_index] = 1
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
# scale down epsilon
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
for i in range(0, K):
# run the selected action and observe next state and reward
s_t1 = np.reshape(states[t+1],(80,80,1))
r_t = reward[t]*(np.dot(a_t,ACTIONS.T))
# store the transition in D
D.append((s_t, a_t, r_t, s_t1))
if len(D) > REPLAY_MEMORY:
D.popleft()
# only train if done observing
if t>BATCH:
# sample a minibatch to train on
minibatch = random.sample(D, BATCH)
# get the batch variables
s_j_batch = [d[0] for d in minibatch]
a_batch = [d[1] for d in minibatch]
r_batch = [d[2] for d in minibatch]
s_j1_batch = [d[3] for d in minibatch]
y_batch = []
readout_j1_batch = readout.eval(feed_dict = {s : s_j1_batch})
for i in range(0, len(minibatch)):
# if terminal only equals reward
# if minibatch[i][4]:
# y_batch.append(r_batch[i])
# else:
y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))
# perform gradient step
train_step.run(feed_dict = {
y : y_batch,
a : a_batch,
s : s_j_batch})
# save progress every 10000 iterations
if TRAIN==True:
if t % 50 == 0:
saver.save(sess, 'saved_networks_1/' + '{}-dqn'.format(time.strftime('%d-%h-%m',time.localtime(time.time()))),global_step=t)
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, "/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, "/ Q_MAX %e" % np.max(readout_t))
if TEST==True:
if t>0:
cum_reward.append(cum_reward[t-1]+r_t)
else :
cum_reward.append(r_t)
if t==states_size-1:
count = 0
for i in cum_reward:
if i>0:
count+=1
print("accuracy rate:",count/states_size)
t_label = np.linspace(0,1,states_size)
# plt.plot(t_label,cum_reward,color='red',linewidth=2)
plt.legend()
plt.plot(cum_reward)
plt.xlabel("Date")
plt.ylabel("Total Return")
plt.title("AMAZON")
create_csv_file("./total_return.csv",cum_reward)
plt.show()
s_t = s_t1
t += 1
def playGame():
sess = tf.InteractiveSession()
s, readout, h_fc1 = createNetwork()
trainNetwork(s, readout, h_fc1, sess)
def main():
playGame()
if __name__ == "__main__":
main()
# imgs2tensor("./dataset/test")
| gpl-2.0 |
UNR-AERIAL/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
brockk/clintrials | clintrials/phase2/bebop/__init__.py | 1 | 9745 | __author__ = 'Kristian Brock'
__contact__ = '[email protected]'
__all__ = ["peps2v1", "peps2v2"]
"""
BeBOP: Bayesian design with Bivariate Outcomes and Predictive variables
Brock, et al. To be published.
BeBOP studies the dual primary outcomes efficacy and toxicity.
The two events can be associated to reflect the potential for correlated
outcomes. The design models the probabilities of efficacy and toxicity
using logistic models so that the information in predictive variables
can be incorporated to tailor the treatment acceptance / rejection
decision.
This is a generalisation of the design that was used in the PePS2 trial.
PePS2 studies the efficacy and toxicity of a drug in a population of
performance status 2 lung cancer patients. Patient outcomes may plausibly
be effected by whether or not they have been treated before, and the
expression rate of PD-L1 in their cells.
PePS2 uses Brock et al's BeBOP design to incorporate the potentially
predictive data in PD-L1 expression rate and whether or not a patient has
been pre-treated to find the sub-population(s) where the drug works
and is tolerable.
"""
import numpy
import pandas as pd
from clintrials.stats import ProbabilityDensitySample
class BeBOP():
"""
"""
def __init__(self, theta_priors, efficacy_model, toxicity_model, joint_model):
"""
Params:
:param theta_priors: list of prior distributions for elements of parameter vector, theta.
Each prior object should support obj.ppf(x) and obj.pdf(x) like classes in scipy
:param efficacy_model: func with signature x, theta; where x is a case vector and theta a 2d array of
parameter values, the first column containing values for the first parameter, the second column the
second parameter, etc, so that each row in theta is a single parameter set. Function should return probability
of efficacy of case x under each parameter set (i.e. each row of theta) so that a 1*len(theta) array should
be returned.
:param toxicity_model: func with signature x, theta; where x is a case vector and theta a 2d array of
parameter values, the first column containing values for the first parameter, the second column the
second parameter, etc, so that each row in theta is a single parameter set. Function should return probability
of toxicity of case x under each parameter set (i.e. each row of theta) so that a 1*len(theta) array should
be returned.
:param joint_model: func with signature x, theta; where x is a case vector and theta a 2d array of
parameter values, the first column containing values for the first parameter, the second column the
second parameter, etc, so that each row in theta is a single parameter set. Function should return the joint
probability of efficacy and toxicity of case x under each parameter set (i.e. each row of theta) so that
a 1*len(theta) array should be returned. Generally this method would use efficacy_model and toxicity_model.
For non-associated events, for instance, the simple product of efficacy_model(x, theta) and
toxicity_model(x, theta) would do the job. For associated events, more complexity is required.
In case vector x, the element x[0] should be boolean efficacy variable, with 1 showing efficacy occurred.
In case vector x, the element x[1] should be boolean toxicity variable, with 1 showing toxicity occurred.
See clintrials.phase2.bebop.peps2v2 for a working trio of efficacy_model, toxicity_model and joint_model that
allow for associated efficacy and toxicity events.
Note: efficacy_model, toxicity_model and joint_model should be vectorised to work with one case and many
parameter sets (rather than just many cases and one parameter set) for quick integration using Monte Carlo.
"""
self.priors = theta_priors
self._pi_e = efficacy_model
self._pi_t = toxicity_model
self._pi_ab = joint_model
# Initialise model
self.reset()
def reset(self):
self.cases = []
self._pds = None
def _l_n(self, D, theta):
if len(D) > 0:
lik = numpy.array(map(lambda x: self._pi_ab(x, theta), D))
return lik.prod(axis=0)
else:
return numpy.ones(len(theta))
def size(self):
return len(self.cases)
def efficacies(self):
return [case[0] for case in self.cases]
def toxicities(self):
return [case[1] for case in self.cases]
def get_case_elements(self, i):
return [case[i] for case in self.cases]
def update(self, cases, n=10**6, epsilon = 0.00001, **kwargs):
""" TODO
:param n:
:param epsilon:
"""
self.cases.extend(cases)
limits = [(dist.ppf(epsilon), dist.ppf(1-epsilon)) for dist in self.priors]
samp = numpy.column_stack([numpy.random.uniform(*limit_pair, size=n) for limit_pair in limits])
lik_integrand = lambda x: self._l_n(cases, x) * numpy.prod(numpy.array([dist.pdf(col) for (dist, col) in zip(self.priors, x.T)]), axis=0)
self._pds = ProbabilityDensitySample(samp, lik_integrand)
return
def _predict_case(self, case, eff_cutoff, tox_cutoff, pds, samp, estimate_ci=False):
x = case
eff_probs = self._pi_e(x, samp)
tox_probs = self._pi_t(x, samp)
from collections import OrderedDict
predictions = OrderedDict([
('Pr(Eff)', pds.expectation(eff_probs)),
('Pr(Tox)', pds.expectation(tox_probs)),
('Pr(AccEff)', pds.expectation((eff_probs > eff_cutoff))),
('Pr(AccTox)', pds.expectation((tox_probs < tox_cutoff))),
])
if estimate_ci:
predictions['Pr(Eff) Lower'] = pds.quantile_vector(eff_probs, 0.05, start_value=0.05)
predictions['Pr(Eff) Upper'] = pds.quantile_vector(eff_probs, 0.95, start_value=0.95)
predictions['Pr(Tox) Lower'] = pds.quantile_vector(tox_probs, 0.05, start_value=0.05)
predictions['Pr(Tox) Upper'] = pds.quantile_vector(tox_probs, 0.95, start_value=0.95)
return predictions
def predict(self, cases, eff_cutoff, tox_cutoff, to_pandas=False, estimate_ci=False):
if self._pds is not None:
pds = self._pds
samp = pds._samp
fitted = [self._predict_case(x, eff_cutoff, tox_cutoff, pds, samp, estimate_ci=estimate_ci) for x in cases]
if to_pandas:
if estimate_ci:
return pd.DataFrame(fitted, columns=['Pr(Eff)', 'Pr(Tox)', 'Pr(AccEff)', 'Pr(AccTox)',
'Pr(Eff) Lower', 'Pr(Eff) Upper', 'Pr(Tox) Lower', 'Pr(Tox) Upper'])
else:
return pd.DataFrame(fitted, columns=['Pr(Eff)', 'Pr(Tox)', 'Pr(AccEff)', 'Pr(AccTox)'])
else:
return fitted
else:
return None
def get_posterior_param_means(self):
if self._pds:
return numpy.apply_along_axis(lambda x: self._pds.expectation(x), 0, self._pds._samp)
else:
return []
def theta_estimate(self, i, alpha=0.05):
""" Get posterior confidence interval and mean estimate of element i in parameter vector.
Returns (lower, mean, upper)
"""
if j < len(self.priors):
mu = self._pds.expectation(self._pds._samp[:,i])
return numpy.array([self._pds.quantile(i, alpha/2), mu, self._pds.quantile(i, 1-alpha/2)])
else:
return (0,0,0)
# def efficacy_effect(self, j, alpha=0.05):
# """ Get confidence interval and mean estimate of the effect on efficacy, expressed as odds-ratios.
# Use:
# - j=0, to get treatment effect of the intercept variable
# - j=1, to get treatment effect of the pre-treated status variable
# - j=2, to get treatment effect of the mutation status variable
# """
# if j==0:
# expected_log_or = self._pds.expectation(self._pds._samp[:,1])
# return np.exp([self._pds.quantile(1, alpha/2), expected_log_or, self._pds.quantile(1, 1-alpha/2)])
# elif j==1:
# expected_log_or = self._pds.expectation(self._pds._samp[:,2])
# return np.exp([self._pds.quantile(2, alpha/2), expected_log_or, self._pds.quantile(2, 1-alpha/2)])
# elif j==2:
# expected_log_or = self._pds.expectation(self._pds._samp[:,3])
# return np.exp([self._pds.quantile(3, alpha/2), expected_log_or, self._pds.quantile(3, 1-alpha/2)])
# else:
# return (0,0,0)
# def toxicity_effect(self, j=0, alpha=0.05):
# """ Get confidence interval and mean estimate of the effect on toxicity, expressed as odds-ratios.
# Use:
# - j=0, to get effect on toxicity of the intercept variable
# """
# if j==0:
# expected_log_or = self._pds.expectation(self._pds._samp[:,0])
# return np.exp([self._pds.quantile(0, alpha/2), expected_log_or, self._pds.quantile(0, 1-alpha/2)])
# else:
# return (0,0,0)
# def correlation_effect(self, alpha=0.05):
# """ Get confidence interval and mean estimate of the correlation between efficacy and toxicity. """
# expected_psi = self._pds.expectation(self._pds._samp[:,4])
# psi_levels = np.array([self._pds.quantile(4, alpha/2), expected_psi, self._pds.quantile(4, 1-alpha/2)])
# return (np.exp(psi_levels) - 1) / (np.exp(psi_levels) + 1) | gpl-3.0 |
diegocavalca/Studies | programming/Python/Machine-Learning/Introduction-Udacity/regression/finance_regression.py | 7 | 2106 | #!/usr/bin/python
"""
Starter code for the regression mini-project.
Loads up/formats a modified version of the dataset
(why modified? we've removed some trouble points
that you'll find yourself in the outliers mini-project).
Draws a little scatterplot of the training/testing data
You fill in the regression code where indicated:
"""
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "target" feature
features_list = ["bonus", "salary"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "b"
### Your regression goes here!
### Please name it reg, so that the plotting code below picks it up and
### plots it correctly. Don't forget to change the test_color above from "b" to
### "r" to differentiate training points from test points.
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
### draw the regression line, once it's coded
try:
plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
pass
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
| cc0-1.0 |
vinodkc/spark | python/pyspark/pandas/tests/test_categorical.py | 14 | 16649 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]: # type: ignore
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def astype(x) -> ps.Series[dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Barmaley-exe/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
dhomeier/astropy | astropy/visualization/wcsaxes/ticks.py | 12 | 6569 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path, Line2D
from matplotlib.transforms import Affine2D
from matplotlib import rcParams
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams['xtick.major.size']
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams['xtick.minor.size'])
self.set_tick_out(rcParams['xtick.direction'] == 'out')
self.clear()
line2d_kwargs = {'color': rcParams['xtick.color'],
'linewidth': rcParams['xtick.major.width']}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.], [0.], **line2d_kwargs)
self.set_visible_axes('all')
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.disp = {}
self.minor_world = {}
self.minor_pixel = {}
self.minor_angle = {}
self.minor_disp = {}
def add(self, axis, world, pixel, angle, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle,
minor_axis_displacement):
if minor_axis not in self.minor_world:
self.minor_world[minor_axis] = [minor_world]
self.minor_pixel[minor_axis] = [minor_pixel]
self.minor_angle[minor_axis] = [minor_angle]
self.minor_disp[minor_axis] = [minor_axis_displacement]
else:
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer, ticks_locs):
"""
Draw the ticks.
"""
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset, ticks_locs)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset, ticks_locs)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset, ticks_locs):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180. if self.get_tick_out() else 0.
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
ticks_locs[axis].append(locs)
gc.restore()
| bsd-3-clause |
Tihacker/Wikipedia-Templates-Analysis | categorizza/fit.py | 1 | 6649 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create template graphs png of multiple graphs.
Usage:
stampagrafici.py [<template>] [options]
Options:
-h --help Show this screen.
-v Verbose.
--esempio
--esempio2
--esempio3
"""
import csv, re, pdb, ast, time, os, math
from docopt import docopt
import datetime
import matplotlib.pyplot as plot
import numpy as np
import matplotlib.dates as mdates
import tarfile
def do(inputtemplate, verbose):
#NORMALIZZAZIONE
if(arguments['--esempio'] or arguments['--esempio2'] or arguments['--esempio3']):
coord = csv.reader(open("it/graphs/total/Bio.csv", "r"))
else:
coord = csv.reader(open(inputtemplate, "r"))
xdiff = 0
ymax = 0
ymin = -1
x = []
y = []
for c in coord:
date = int(c[0])
value = int(c[1])
if xdiff == 0:
xdiff = date
if ymin == -1:
ymin = value
if value > ymax:
ymax = value
if value < ymin:
ymin = value
x.append(date - xdiff)
y.append(value)
xdiff = date - xdiff
ydiff = ymax - ymin
normx = []
normy = []
n = 0
if xdiff != 0 and ydiff != 0:
while (n < len(x)):
partial = 0
percent = x[n] / xdiff
normx.append(percent)
partial = y[n] - ymin
normy.append(partial / ydiff)
n = n + 1
else:
return 1
#ESEMPIO2
number = 0
halfnormx = []
limit = 0
while number < len(normx)/4:
halfnormx.append(normx[number])
limit = normx[number]
number = number + 1
number = 0
halfnormy = []
while number < len(normx)/4:
halfnormy.append(normy[number])
number = number + 1
if(arguments['--esempio3']):
normx = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
normy = [0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5]
#PRE
np.seterr(all="warn")
l = np.linspace(0, 1, len(normx))
r2 = {}
#PIATTA
z = np.polyfit(normx, normy, 0)
piatta = np.poly1d(z)
i = 0
media = np.sum(normy)/len(normy)
ssreg = np.sum((piatta(normx)-media)**2)
sstot = np.sum((normy - media)**2)
print(sstot)
r2["piatta"] = ssreg / sstot
#LINEARE
if(arguments['--esempio']):
z = np.polyfit(halfnormx, halfnormy, 1)
else:
z = np.polyfit(normx, normy, 1)
lineare = np.poly1d(z)
i = 0
media = np.sum(normy)/len(normy)
ssreg = np.sum((lineare(normx)-media)**2)
sstot = np.sum((normy - media)**2)
r2["lineare"] = ssreg / sstot
#POLINOMIALE
if(arguments['--esempio']):
z = np.polyfit(halfnormx, halfnormy, 2)
else:
z = np.polyfit(normx, normy, 2)
polinomiale = np.poly1d(z)
i = 0
media = np.sum(normy)/len(normy)
ssreg = np.sum((polinomiale(normx)-media)**2)
sstot = np.sum((normy - media)**2)
r2["polinomiale"] = ssreg / sstot
#ESPONENZIALE
normyexp = []
for n in normy:
if n == 0:
normyexp.append(0)
else:
normyexp.append(np.log(n))
z = np.polyfit(normx, normyexp, 1)
esponenziale = np.poly1d(z)
i = 0
media = np.sum(normyexp)/len(normyexp)
ssreg = np.sum((esponenziale(normx)-media)**2)
sstot = np.sum((normyexp - media)**2)
if (sstot != 0):
r2["esponenziale"] = ssreg / sstot
else:
r2["esponenziale"] = 0
#SQUARE
normysquare = []
for n in normy:
normysquare.append(n**2)
z = np.polyfit(normx, normysquare, 1)
radice = np.poly1d(z)
i = 0
media = np.sum(normysquare)/len(normysquare)
ssreg = np.sum((radice(normx)-media)**2)
sstot = np.sum((normysquare - media)**2)
r2["radice"] = ssreg / sstot
#LOG
normylog = []
for n in normy:
normylog.append(np.exp(n))
z = np.polyfit(normx, normylog, 1)
log = np.poly1d(z)
i = 0
media = np.sum(normylog)/len(normylog)
ssreg = np.sum((log(normx)-media)**2)
sstot = np.sum((normylog - media)**2)
r2["log"] = ssreg / sstot
#CALCOLOMIGLIORE
best = max(r2, key=r2.get)
if r2[best] == r2["polinomiale"]:
if ((r2["polinomiale"] - r2["lineare"])/r2["lineare"] < 0.1):
best = "lineare"
if r2[best] == r2["lineare"]:
if ((r2["lineare"] - r2["piatta"])/r2["piatta"] < 0.1):
best = "piatta"
if r2[best] <= 0.5:
best = "none"
if (verbose):
#STAMPAGRAFICI
fig = plot.figure()
ax = fig.add_subplot(111)
ax.grid(which='major', alpha=0.5)
plot.xlabel('X')
plot.ylabel("Y")
if (arguments['--esempio']):
l = np.linspace(-1, 2, len(normx))
ax.set_xlim([0,1])
ax.set_ylim([0,1.5])
ax.axvline(limit, linestyle='--', label = "Limite fit")
plot.plot(normx, normy, 'ro', label = "Punti")
plot.plot(l, lineare(l), linewidth=2, label="Lineare")
plot.plot(l, polinomiale(l), linewidth=2, label="Polinomiale")
if (arguments['--esempio2']):
plot.xlabel('X')
plot.ylabel("Y")
plot.plot(l, normy)
plot.plot(l, normylog, linewidth=1, label="Log")
else:
ax.set_xlim([0,1])
ax.set_ylim([0,1])
plot.plot(normx, normy, linewidth=2, label="Curva")
plot.plot(l, piatta(l), linewidth=1, label="Piatta")
plot.plot(l, lineare(l), linewidth=1, label="Lineare")
plot.plot(l, polinomiale(l), linewidth=1, label="Polinomiale")
plot.plot(l, np.exp(esponenziale(l)), linewidth=1, label="Esponenziale")
plot.plot(l, np.sqrt(radice(l)), linewidth=1, label="Radice")
plot.plot(l, np.log(log(l)), linewidth=1, label="Log")
plot.legend(bbox_to_anchor=(0.35, 1))
print("PIATTA: " + str(r2["piatta"]))
print("LINEARE: " + str(r2["lineare"]))
print("POLI: " + str(r2["polinomiale"]))
print("ESPONENZIALE: " + str(r2["esponenziale"]))
print("SQUARE: " + str(r2["radice"]))
print("LOG: " + str(r2["log"]))
print("Migliore: " + best)
plot.show()
return best
if __name__ == "__main__":
arguments = docopt(__doc__)
i = arguments['<template>']
result = do(i, arguments['-v'])
print(result) | mit |
lukas/ml-class | videos/time-series/plotutil.py | 2 | 2197 | import matplotlib
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from tensorflow import keras
import numpy as np
import wandb
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def repeated_predictions(model, data, look_back, steps=100):
predictions = []
for i in range(steps):
input_data = data[np.newaxis, :, np.newaxis]
generated = model.predict(input_data)[0]
data = np.append(data, generated)[-look_back:]
predictions.append(generated)
return predictions
class PlotCallback(keras.callbacks.Callback):
def __init__(self, trainX, trainY, testX, testY, look_back):
self.repeat_predictions = True
self.trainX = trainX
self.trainY = trainY
self.testX = testX
self.testY = testY
self.look_back = look_back
def on_epoch_end(self, epoch, logs):
if self.repeat_predictions:
preds = repeated_predictions(
self.model, self.trainX[-1, :, 0], self.look_back, self.testX.shape[0])
else:
preds = model.predict(testX)
# Generate a figure with matplotlib</font>
figure = matplotlib.pyplot.figure(figsize=(10, 10))
plot = figure.add_subplot(111)
plot.plot(self.trainY)
plot.plot(np.append(np.empty_like(self.trainY) * np.nan, self.testY))
plot.plot(np.append(np.empty_like(self.trainY) * np.nan, preds))
data = fig2data(figure)
matplotlib.pyplot.close(figure)
wandb.log({"image": wandb.Image(data)}, commit=False)
| gpl-2.0 |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
equialgo/scikit-learn | sklearn/neighbors/tests/test_lof.py | 34 | 4142 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from math import sqrt
import numpy as np
from sklearn import neighbors
from numpy.testing import assert_array_equal
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.datasets import load_iris
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof():
# Toy sample (the last two samples are outliers):
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]]
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert_greater(np.min(score[:-2]), np.max(score[-2:]))
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25,
n_neighbors=5).fit(X)
assert_array_equal(clf._predict(), 6 * [1] + 2 * [-1])
def test_lof_performance():
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = neighbors.LocalOutlierFactor().fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf._decision_function(X_test)
# check that roc_auc is good
assert_greater(roc_auc_score(y_test, y_pred), .99)
def test_lof_values():
# toy samples:
X_train = [[1, 1], [1, 2], [2, 1]]
clf = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X_train)
s_0 = 2. * sqrt(2.) / (1. + sqrt(2.))
s_1 = (1. + sqrt(2)) * (1. / (4. * sqrt(2.)) + 1. / (2. + 2. * sqrt(2)))
# check predict()
assert_array_almost_equal(-clf.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_array_almost_equal(-clf._decision_function([[2., 2.]]), [s_0])
# # check predict(one sample already in train)
assert_array_almost_equal(-clf._decision_function([[1., 1.]]), [s_1])
def test_lof_precomputed(random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X._predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute',
metric='precomputed')
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D._predict(DYX)
assert_array_almost_equal(pred_X_X, pred_D_X)
assert_array_almost_equal(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
assert_warns_message(UserWarning,
"n_neighbors will be set to (n_samples - 1)",
clf.fit, X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
FedericoGarza/tsfeatures | tsfeatures/metrics/metrics.py | 1 | 10356 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from functools import partial
from math import sqrt
from multiprocessing import Pool
from typing import Callable, Optional
AVAILABLE_METRICS = ['mse', 'rmse', 'mape', 'smape', 'mase', 'rmsse',
'mini_owa', 'pinball_loss']
######################################################################
# METRICS
######################################################################
def mse(y: np.array, y_hat:np.array) -> float:
"""Calculates Mean Squared Error.
MSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar:
MSE
"""
mse = np.mean(np.square(y - y_hat))
return mse
def rmse(y: np.array, y_hat:np.array) -> float:
"""Calculates Root Mean Squared Error.
RMSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Finally the RMSE will be in the same scale
as the original time series so its comparison with other
series is possible only if they share a common scale.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: RMSE
"""
rmse = sqrt(np.mean(np.square(y - y_hat)))
return rmse
def mape(y: np.array, y_hat:np.array) -> float:
"""Calculates Mean Absolute Percentage Error.
MAPE measures the relative prediction accuracy of a
forecasting method by calculating the percentual deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: MAPE
"""
mape = np.mean(np.abs(y - y_hat) / np.abs(y))
mape = 100 * mape
return mape
def smape(y: np.array, y_hat:np.array) -> float:
"""Calculates Symmetric Mean Absolute Percentage Error.
SMAPE measures the relative prediction accuracy of a
forecasting method by calculating the relative deviation
of the prediction and the true value scaled by the sum of the
absolute values for the prediction and true value at a
given time, then averages these devations over the length
of the series. This allows the SMAPE to have bounds between
0% and 200% which is desireble compared to normal MAPE that
may be undetermined.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
Returns
-------
scalar: SMAPE
"""
scale = np.abs(y) + np.abs(y_hat)
scale[scale == 0] = 1e-3
smape = np.mean(np.abs(y - y_hat) / scale)
smape = 200 * smape
return smape
def mase(y: np.array, y_hat: np.array,
y_train: np.array, seasonality: int = 1) -> float:
"""Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Returns
-------
scalar: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase
def rmsse(y: np.array, y_hat: np.array,
y_train: np.array, seasonality: int = 1) -> float:
"""Calculates the M5 Root Mean Squared Scaled Error.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
y_train: numpy array
actual train values
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Returns
-------
scalar: RMSSE
"""
scale = np.mean(np.square(y_train[seasonality:] - y_train[:-seasonality]))
rmsse = sqrt(mse(y, y_hat) / scale)
rmsse = 100 * rmsse
return rmsse
def mini_owa(y: np.array, y_hat: np.array,
y_train: np.array,
seasonality: int,
y_bench: np.array):
"""Calculates the Overall Weighted Average for a single series.
MASE, sMAPE for Naive2 and current model
then calculatess Overall Weighted Average.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
y_train: numpy array
insample values of the series for scale
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
y_bench: numpy array of len h (forecasting horizon)
predicted values of the benchmark model
Returns
-------
return: mini_OWA
"""
mase_y = mase(y, y_hat, y_train, seasonality)
mase_bench = mase(y, y_bench, y_train, seasonality)
smape_y = smape(y, y_hat)
smape_bench = smape(y, y_bench)
mini_owa = ((mase_y/mase_bench) + (smape_y/smape_bench))/2
return mini_owa
def pinball_loss(y: np.array, y_hat: np.array, tau: int = 0.5):
"""Calculates the Pinball Loss.
The Pinball loss measures the deviation of a quantile forecast.
By weighting the absolute deviation in a non symmetric way, the
loss pays more attention to under or over estimation.
A common value for tau is 0.5 for the deviation from the median.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
tau: float
Fixes the quantile against which the predictions are compared.
Returns
-------
return: pinball_loss
"""
delta_y = y - y_hat
pinball = np.maximum(tau * delta_y, (tau-1) * delta_y)
pinball = pinball.mean()
return pinball
######################################################################
# PANEL EVALUATION
######################################################################
def _evaluate_ts(uid, y_test, y_hat,
y_train, metric,
seasonality, y_bench, metric_name):
y_test_uid = y_test.loc[uid].y.values
y_hat_uid = y_hat.loc[uid].y_hat.values
if metric_name in ['mase', 'rmsse']:
y_train_uid = y_train.loc[uid].y.values
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid,
seasonality=seasonality)
elif metric_name in ['mini_owa']:
y_train_uid = y_train.loc[uid].y.values
y_bench_uid = y_bench.loc[uid].y_hat.values
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid,
seasonality=seasonality,
y_bench=y_bench_uid)
else:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid)
return uid, evaluation_uid
def evaluate_panel(y_test: pd.DataFrame,
y_hat: pd.DataFrame,
y_train: pd.DataFrame,
metric: Callable,
seasonality: Optional[int] = None,
y_bench: Optional[pd.DataFrame] = None,
threads: Optional[int] = None):
"""Calculates a specific metric for y and y_hat (and y_train, if needed).
Parameters
----------
y_test: pandas df
df with columns ['unique_id', 'ds', 'y']
y_hat: pandas df
df with columns ['unique_id', 'ds', 'y_hat']
y_train: pandas df
df with columns ['unique_id', 'ds', 'y'] (train)
This is used in the scaled metrics ('mase', 'rmsse').
metric: callable
loss function
seasonality: int
Main frequency of the time series.
Used in ('mase', 'rmsse').
Commonly used seasonalities:
Hourly: 24,
Daily: 7,
Weekly: 52,
Monthly: 12,
Quarterly: 4,
Yearly: 1.
y_bench: pandas df
df with columns ['unique_id', 'ds', 'y_hat']
predicted values of the benchmark model
This is used in 'mini_owa'.
threads: int
Number of threads to use. Use None (default) for parallel processing.
Returns
------
pandas dataframe:
loss ofr each unique_id in the panel data
"""
metric_name = metric.__code__.co_name
uids = y_test['unique_id'].unique()
y_hat_uids = y_hat['unique_id'].unique()
assert len(y_test)==len(y_hat), "not same length"
assert all(uids == y_hat_uids), "not same u_ids"
y_test = y_test.set_index(['unique_id', 'ds'])
y_hat = y_hat.set_index(['unique_id', 'ds'])
if metric_name in ['mase', 'rmsse']:
y_train = y_train.set_index(['unique_id', 'ds'])
elif metric_name in ['mini_owa']:
y_train = y_train.set_index(['unique_id', 'ds'])
y_bench = y_bench.set_index(['unique_id', 'ds'])
partial_evaluation = partial(_evaluate_ts, y_test=y_test, y_hat=y_hat,
y_train=y_train, metric=metric,
seasonality=seasonality,
y_bench=y_bench,
metric_name=metric_name)
with Pool(threads) as pool:
evaluations = pool.map(partial_evaluation, uids)
evaluations = pd.DataFrame(evaluations, columns=['unique_id', 'error'])
return evaluations
| mit |
Dih5/xpecgen | xpecgen/xpecgen.py | 1 | 33042 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""xpecgen.py: A module to calculate x-ray spectra generated in tungsten anodes."""
from __future__ import print_function
import math
from bisect import bisect_left
import os
from glob import glob
import warnings
import csv
import numpy as np
from scipy import interpolate, integrate, optimize
import xlsxwriter
try:
import matplotlib.pyplot as plt
plt.ion()
plot_available = True
except ImportError:
warnings.warn("Unable to import matplotlib. Plotting will be disabled.")
plot_available = False
__author__ = 'Dih5'
__version__ = "1.3.0"
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
# --------------------General purpose functions-------------------------#
def log_interp_1d(xx, yy, kind='linear'):
"""
Perform interpolation in log-log scale.
Args:
xx (List[float]): x-coordinates of the points.
yy (List[float]): y-coordinates of the points.
kind (str or int, optional): The kind of interpolation in the log-log domain. This is passed to
scipy.interpolate.interp1d.
Returns:
A function whose call method uses interpolation in log-log scale to find the value at a given point.
"""
log_x = np.log(xx)
log_y = np.log(yy)
# No big difference in efficiency was found when replacing interp1d by
# UnivariateSpline
lin_interp = interpolate.interp1d(log_x, log_y, kind=kind)
return lambda zz: np.exp(lin_interp(np.log(zz)))
# This custom implementation of dblquad is based in the one in numpy
# (Cf. https://github.com/scipy/scipy/blob/v0.16.1/scipy/integrate/quadpack.py#L449 )
# It was modified to work only in rectangular regions (no g(x) nor h(x))
# to set the inner integral epsrel
# and to increase the limit of points taken
def _infunc(x, func, c, d, more_args, epsrel):
myargs = (x,) + more_args
return integrate.quad(func, c, d, args=myargs, epsrel=epsrel, limit=2000)[0]
def custom_dblquad(func, a, b, c, d, args=(), epsabs=1.49e-8, epsrel=1.49e-8, maxp1=50, limit=2000):
"""
A wrapper around numpy's dblquad to restrict it to a rectangular region and to pass arguments to the 'inner'
integral.
Args:
func: The integrand function f(y,x).
a (float): The lower bound of the second argument in the integrand function.
b (float): The upper bound of the second argument in the integrand function.
c (float): The lower bound of the first argument in the integrand function.
d (float): The upper bound of the first argument in the integrand function.
args (sequence, optional): extra arguments to pass to func.
epsabs (float, optional): Absolute tolerance passed directly to the inner 1-D quadrature integration.
epsrel (float, optional): Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
maxp1 (float or int, optional): An upper bound on the number of Chebyshev moments.
limit (int, optional): Upper bound on the number of cycles (>=3) for use with a sinusoidal weighting and an
infinite end-point.
Returns:
(tuple): tuple containing:
y (float): The resultant integral.
abserr (float): An estimate of the error.
"""
return integrate.quad(_infunc, a, b, (func, c, d, args, epsrel),
epsabs=epsabs, epsrel=epsrel, maxp1=maxp1, limit=limit)
def triangle(x, loc=0, size=0.5, area=1):
"""
The triangle window function centered in loc, of given size and area, evaluated at a point.
Args:
x: The point where the function is evaluated.
loc: The position of the peak.
size: The total.
area: The area below the function.
Returns:
The value of the function.
"""
# t=abs((x-loc)/size)
# return 0 if t>1 else (1-t)*abs(area/size)
return 0 if abs((x - loc) / size) > 1 else (1 - abs((x - loc) / size)) * abs(area / size)
# --------------------Spectrum model functionality----------------------#
class Spectrum:
"""
Set of 2D points and discrete components representing a spectrum.
A Spectrum can be multiplied by a scalar (int, float...) to increase its counts in such a factor.
Two spectra can be added if they share their x axes and their discrete component positions.
Note: When two spectrum are added it is not checked it that addition makes sense. It is the user's responsibility to
do so.
Attributes:
x (:obj:`numpy.ndarray`): x coordinates (energy) describing the continuum part of the spectrum.
y (:obj:`numpy.ndarray`): y coordinates (pdf) describing the continuum part of the spectrum.
discrete (List[List[float]]): discrete components of the spectrum, each of the form [x, num, rel_x] where:
* x is the mean position of the peak.
* num is the number of particles in the peak.
* rel_x is a characteristic distance where it should extend. The exact meaning depends on the windows function.
"""
def __init__(self):
"""
Create an empty spectrum.
"""
self.x = []
self.y = []
self.discrete = []
def clone(self):
"""
Return a new Spectrum object cloning itself
Returns:
:obj:`Spectrum`: The new Spectrum.
"""
s = Spectrum()
s.x = list(self.x)
s.y = self.y[:]
s.discrete = []
for a in self.discrete:
s.discrete.append(a[:])
return s
def get_continuous_function(self):
"""
Get a function representing the continuous part of the spectrum.
Returns:
An interpolation function representing the continuous part of the spectrum.
"""
return interpolate.interp1d(self.x, self.y, bounds_error=False, fill_value=0)
def get_points(self, peak_shape=triangle, num_discrete=10):
"""
Returns two lists of coordinates x y representing the whole spectrum, both the continuous and discrete components.
The mesh is chosen by extending x to include details of the discrete peaks.
Args:
peak_shape: The window function used to calculate the peaks. See :obj:`triangle` for an example.
num_discrete: Number of points that are added to mesh in each peak.
Returns:
(tuple): tuple containing:
x2 (List[float]): The list of x coordinates (energy) in the whole spectrum.
y2 (List[float]): The list of y coordinates (density) in the whole spectrum.
"""
if peak_shape is None or self.discrete == []:
return self.x[:], self.y[:]
# A mesh for each discrete component:
discrete_mesh = np.concatenate(list(map(lambda x: np.linspace(
x[0] - x[2], x[0] + x[2], num=num_discrete, endpoint=True), self.discrete)))
x2 = sorted(np.concatenate((discrete_mesh, self.x)))
f = self.get_continuous_function()
peak = np.vectorize(peak_shape)
def g(x):
t = 0
for l in self.discrete:
t += peak(x, loc=l[0], size=l[2]) * l[1]
return t
y2 = [f(x) + g(x) for x in x2]
return x2, y2
def get_plot(self, place, show_mesh=True, prepare_format=True, peak_shape=triangle):
"""
Prepare a plot of the data in the given place
Args:
place: The class whose method plot is called to produce the plot (e.g., matplotlib.pyplot).
show_mesh (bool): Whether to plot the points over the continuous line as circles.
prepare_format (bool): Whether to include ticks and labels in the plot.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
"""
if prepare_format:
place.tick_params(axis='both', which='major', labelsize=10)
place.tick_params(axis='both', which='minor', labelsize=8)
place.set_xlabel('E', fontsize=10, fontweight='bold')
place.set_ylabel('f(E)', fontsize=10, fontweight='bold')
x2, y2 = self.get_points(peak_shape=peak_shape)
if show_mesh:
place.plot(self.x, self.y, 'bo', x2, y2, 'b-')
else:
place.plot(x2, y2, 'b-')
def show_plot(self, show_mesh=True, block=True):
"""
Prepare the plot of the data and show it in matplotlib window.
Args:
show_mesh (bool): Whether to plot the points over the continuous line as circles.
block (bool): Whether the plot is blocking or non blocking.
"""
if plot_available:
plt.clf()
self.get_plot(plt, show_mesh=show_mesh, prepare_format=False)
plt.xlabel("E")
plt.ylabel("f(E)")
plt.gcf().canvas.set_window_title("".join(('xpecgen v', __version__)))
plt.show(block=block)
else:
warnings.warn("Asked for a plot but matplotlib could not be imported.")
def export_csv(self, route="a.csv", peak_shape=triangle, transpose=False):
"""
Export the data to a csv file (comma-separated values).
Args:
route (str): The route where the file will be saved.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
transpose (bool): True to write in two columns, False in two rows.
"""
x2, y2 = self.get_points(peak_shape=peak_shape)
with open(route, 'w') as csvfile:
w = csv.writer(csvfile, dialect='excel')
if transpose:
w.writerows([list(a) for a in zip(*[x2, y2])])
else:
w.writerow(x2)
w.writerow(y2)
def export_xlsx(self, route="a.xlsx", peak_shape=triangle, markers=False):
"""
Export the data to a xlsx file (Excel format).
Args:
route (str): The route where the file will be saved.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
markers (bool): Whether to use markers or a continuous line in the plot in the file.
"""
x2, y2 = self.get_points(peak_shape=peak_shape)
workbook = xlsxwriter.Workbook(route)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
worksheet.write(0, 0, "Energy (keV)", bold)
worksheet.write(0, 1, "Photon density (1/keV)", bold)
worksheet.write_column('A2', x2)
worksheet.write_column('B2', y2)
# Add a plot
if markers:
chart = workbook.add_chart(
{'type': 'scatter', 'subtype': 'straight_with_markers'})
else:
chart = workbook.add_chart(
{'type': 'scatter', 'subtype': 'straight'})
chart.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$' + str(len(x2) + 1),
'values': '=Sheet1!$B$2:$B$' + str(len(y2) + 1),
})
chart.set_title({'name': 'Emission spectrum'})
chart.set_x_axis(
{'name': 'Energy (keV)', 'min': 0, 'max': str(x2[-1])})
chart.set_y_axis({'name': 'Photon density (1/keV)'})
chart.set_legend({'position': 'none'})
chart.set_style(11)
worksheet.insert_chart('D3', chart, {'x_offset': 25, 'y_offset': 10})
workbook.close()
def get_norm(self, weight=None):
"""
Return the norm of the spectrum using a weighting function.
Args:
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
Returns:
(float): The calculated norm.
"""
if weight is None:
w = lambda x: 1
else:
w = weight
y2 = list(map(lambda x, y: w(x) * y, self.x, self.y))
return integrate.simps(y2, x=self.x) + sum([w(a[0]) * a[1] for a in self.discrete])
def set_norm(self, value=1, weight=None):
"""
Set the norm of the spectrum using a weighting function.
Args:
value (float): The norm of the modified spectrum in the given convention.
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
"""
norm = self.get_norm(weight=weight) / value
self.y = [a / norm for a in self.y]
self.discrete = [[a[0], a[1] / norm, a[2]] for a in self.discrete]
def hvl(self, value=0.5, weight=lambda x: 1, mu=lambda x: 1, energy_min=0):
"""
Calculate a generalized half-value-layer.
This method calculates the depth of a material needed for a certain dosimetric magnitude to decrease in a given factor.
Args:
value (float): The factor the desired magnitude is decreased. Must be in [0, 1].
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
mu: The energy absorption coefficient as a function of energy.
energy_min (float): A low-energy cutoff to use in the calculation.
Returns:
(float): The generalized hvl in cm.
"""
# TODO: (?) Cut characteristic if below cutoff. However, such a high cutoff
# would probably make no sense
# Use low-energy cutoff
low_index = bisect_left(self.x, energy_min)
x = self.x[low_index:]
y = self.y[low_index:]
# Normalize to 1 with weighting function
y2 = list(map(lambda a, b: weight(a) * b, x, y))
discrete2 = [weight(a[0]) * a[1] for a in self.discrete]
n2 = integrate.simps(y2, x=x) + sum(discrete2)
y3 = [a / n2 for a in y2]
discrete3 = [[a[0], weight(a[0]) * a[1] / n2] for a in self.discrete]
# Now we only need to add attenuation as a function of depth
f = lambda t: integrate.simps(list(map(lambda a, b: b * math.exp(-mu(a) * t), x, y3)), x=x) + sum(
[c[1] * math.exp(-mu(c[0]) * t) for c in discrete3]) - value
# Search the order of magnitude of the root (using the fact that f is
# monotonically decreasing)
a = 1.0
if f(a) > 0:
while f(a) > 0:
a *= 10.0
# Now f(a)<=0 and f(a*0.1)>0
return optimize.brentq(f, a * 0.1, a)
else:
while f(a) < 0:
a *= 0.1
# Now f(a)>=0 and f(a*10)<0
return optimize.brentq(f, a, a * 10.0)
def attenuate(self, depth=1, mu=lambda x: 1):
"""
Attenuate the spectrum as if it passed thorough a given depth of material with attenuation described by a given
attenuation coefficient. Consistent units should be used.
Args:
depth: The amount of material (typically in cm).
mu: The energy-dependent absorption coefficient (typically in cm^-1).
"""
self.y = list(
map(lambda x, y: y * math.exp(-mu(x) * depth), self.x, self.y))
self.discrete = list(
map(lambda l: [l[0], l[1] * math.exp(-mu(l[0]) * depth), l[2]], self.discrete))
def __add__(self, other):
"""Add two instances, assuming that makes sense."""
if not isinstance(other, Spectrum): # so s+0=s and sum([s1, s2,...]) makes sense
return self
s = Spectrum()
s.x = self.x
s.y = [a + b for a, b in zip(self.y, other.y)]
s.discrete = [[a[0], a[1] + b[1], a[2]] for a, b in zip(self.discrete, other.discrete)]
return s
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
"""Multiply the counts by an scalar."""
s2 = self.clone()
s2.y = [a * other for a in self.y]
s2.discrete = [[a[0], a[1] * other, a[2]] for a in self.discrete]
return s2
def __rmul__(self, other):
return self.__mul__(other)
# --------------------Spectrum calculation functionality----------------#
def get_fluence(e_0=100.0):
"""
Returns a function representing the electron fluence with the distance in CSDA units.
Args:
e_0 (float): The kinetic energy whose CSDA range is used to scale the distances.
Returns:
A function representing fluence(x,u) with x in CSDA units.
"""
# List of available energies
e0_str_list = list(map(lambda x: (os.path.split(x)[1]).split(".csv")[
0], glob(os.path.join(data_path, "fluence", "*.csv"))))
e0_list = sorted(list(map(int, list(filter(str.isdigit, e0_str_list)))))
e_closest = min(e0_list, key=lambda x: abs(x - e_0))
with open(os.path.join(data_path, "fluence/grid.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = np.array([float(a) for a in t[0].split(",")])
t = next(r)
u = np.array([float(a) for a in t[0].split(",")])
t = []
with open(os.path.join(data_path, "fluence", "".join([str(e_closest), ".csv"])), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in r:
t.append([float(a) for a in row[0].split(",")])
t = np.array(t)
f = interpolate.RectBivariateSpline(x, u, t, kx=1, ky=1)
# Note f is returning numpy 1x1 arrays
return f
# return lambda x,u:f(x,u)[0]
def get_cs(e_0=100, z=74):
"""
Returns a function representing the scaled bremsstrahlung cross_section.
Args:
e_0 (float): The electron kinetic energy, used to scale u=e_e/e_0.
z (int): Atomic number of the material.
Returns:
A function representing cross_section(e_g,u) in mb/keV, with e_g in keV.
"""
# NOTE: Data is given for E0>1keV. CS values below this level should be used with caution.
# The default behaviour is to keep it constant
with open(os.path.join(data_path, "cs/grid.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
e_e = np.array([float(a) for a in t[0].split(",")])
log_e_e = np.log10(e_e)
t = next(r)
k = np.array([float(a) for a in t[0].split(",")])
t = []
with open(os.path.join(data_path, "cs/%d.csv" % z), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in r:
t.append([float(a) for a in row[0].split(",")])
t = np.array(t)
scaled = interpolate.RectBivariateSpline(log_e_e, k, t, kx=3, ky=1)
m_electron = 511
z2 = z * z
return lambda e_g, u: (u * e_0 + m_electron) ** 2 * z2 / (u * e_0 * e_g * (u * e_0 + 2 * m_electron)) * (
scaled(np.log10(u * e_0), e_g / (u * e_0)))
def get_mu(z=74):
"""
Returns a function representing an energy-dependent attenuation coefficient.
Args:
z (int or str): The identifier of the material in the data folder, typically the atomic number.
Returns:
The attenuation coefficient mu(E) in cm^-1 as a function of the energy measured in keV.
"""
with open(os.path.join(data_path, "mu", "".join([str(z), ".csv"])), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return log_interp_1d(x, y)
def get_csda(z=74):
"""
Returns a function representing the CSDA range in tungsten.
Args:
z (int): Atomic number of the material.
Returns:
The CSDA range in cm in tungsten as a function of the electron kinetic energy in keV.
"""
with open(os.path.join(data_path, "csda/%d.csv" % z), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return interpolate.interp1d(x, y, kind='linear')
def get_mu_csda(e_0, z=74):
"""
Returns a function representing the CSDA-scaled energy-dependent attenuation coefficient in tungsten.
Args:
e_0 (float): The electron initial kinetic energy.
z (int): Atomic number of the material.
Returns:
The attenuation coefficient mu(E) in CSDA units as a function of the energy measured in keV.
"""
mu = get_mu(z)
csda = get_csda(z=z)(e_0)
return lambda e: mu(e) * csda
def get_fluence_to_dose():
"""
Returns a function representing the weighting factor which converts fluence to dose.
Returns:
A function representing the weighting factor which converts fluence to dose in Gy * cm^2.
"""
with open(os.path.join(data_path, "fluence2dose/f2d.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return interpolate.interp1d(x, y, kind='linear')
def get_source_function(fluence, cs, mu, theta, e_g, phi=0.0):
"""
Returns the attenuated source function (Eq. 2 in the paper) for the given parameters.
An E_0-dependent factor (the fraction found there) is excluded. However, the E_0 dependence is removed in
integrate_source.
Args:
fluence: The function representing the fluence.
cs: The function representing the bremsstrahlung cross-section.
mu: The function representing the attenuation coefficient.
theta (float): The emission angle in degrees, the anode's normal being at 90º.
e_g (float): The emitted photon energy in keV.
phi (float): The elevation angle in degrees, the anode's normal being at 12º.
Returns:
The attenuated source function s(u,x).
"""
factor = -mu(e_g) / math.sin(math.radians(theta)) / math.cos(math.radians(phi))
return lambda u, x: fluence(x, u) * cs(e_g, u) * math.exp(factor * x)
def integrate_source(fluence, cs, mu, theta, e_g, e_0, phi=0.0, x_min=0.0, x_max=0.6, epsrel=0.1, z=74):
"""
Find the integral of the attenuated source function.
An E_0-independent factor is excluded (i.e., the E_0 dependence on get_source_function is taken into account here).
Args:
fluence: The function representing the fluence.
cs: The function representing the bremsstrahlung cross-section.
mu: The function representing the attenuation coefficient.
theta (float): The emission angle in degrees, the anode's normal being at 90º.
e_g: (float): The emitted photon energy in keV.
e_0 (float): The electron initial kinetic energy.
phi (float): The elevation angle in degrees, the anode's normal being at 12º.
x_min: The lower-bound of the integral in depth, scaled by the CSDA range.
x_max: The upper-bound of the integral in depth, scaled by the CSDA range.
epsrel: The relative tolerance of the integral.
z (int): Atomic number of the material.
Returns:
float: The value of the integral.
"""
if e_g >= e_0:
return 0
f = get_source_function(fluence, cs, mu, theta, e_g, phi=phi)
(y, y_err) = custom_dblquad(f, x_min, x_max, e_g / e_0, 1, epsrel=epsrel, limit=100)
# The factor includes n_med, its units being 1/(mb * r_CSDA). We only take into account the r_CSDA dependence.
y *= get_csda(z=z)(e_0)
return y
def add_char_radiation(s, method="fraction_above_poly"):
"""
Adds characteristic radiation to a calculated bremsstrahlung spectrum, assuming it is a tungsten-generated spectrum
If a discrete component already exists in the spectrum, it is replaced.
Args:
s (:obj:`Spectrum`): The spectrum whose discrete component is recalculated.
method (str): The method to use to calculate the discrete component. Available methods include:
* 'fraction_above_linear': Use a linear relation between bremsstrahlung above the K-edge and peaks.
* 'fraction_above_poly': Use polynomial fits between bremsstrahlung above the K-edge and peaks.
"""
s.discrete = []
if s.x[-1] < 69.51: # If under k edge, no char radiation
return
f = s.get_continuous_function()
norm = integrate.quad(f, s.x[0], s.x[-1], limit=2000)[0]
fraction_above = integrate.quad(f, 74, s.x[-1], limit=2000)[0] / norm
if method == "fraction_above_linear":
s.discrete.append([58.65, 0.1639 * fraction_above * norm, 1])
s.discrete.append([67.244, 0.03628 * fraction_above * norm, 1])
s.discrete.append([69.067, 0.01410 * fraction_above * norm, 1])
else:
if method != "fraction_above_poly":
print(
"WARNING: Unknown char radiation calculation method. Using fraction_above_poly")
s.discrete.append([58.65, (0.1912 * fraction_above - 0.00615 *
fraction_above ** 2 - 0.1279 * fraction_above ** 3) * norm, 1])
s.discrete.append([67.244, (0.04239 * fraction_above + 0.002003 *
fraction_above ** 2 - 0.02356 * fraction_above ** 3) * norm, 1])
s.discrete.append([69.067, (0.01437 * fraction_above + 0.002346 *
fraction_above ** 2 - 0.009332 * fraction_above ** 3) * norm, 1])
return
def console_monitor(a, b):
"""
Simple monitor function which can be used with :obj:`calculate_spectrum`.
Prints in stdout 'a/b'.
Args:
a: An object representing the completed amount (e.g., a number representing a part...).
b: An object representing the total amount (... of a number representing a total).
"""
print("Calculation: ", a, "/", b)
def calculate_spectrum_mesh(e_0, theta, mesh, phi=0.0, epsrel=0.2, monitor=console_monitor, z=74):
"""
Calculates the x-ray spectrum for given parameters.
Characteristic peaks are also calculated by add_char_radiation, which is called with the default parameters.
Args:
e_0 (float): Electron kinetic energy in keV
theta (float): X-ray emission angle in degrees, the normal being at 90º
mesh (list of float or ndarray): The photon energies where the integral will be evaluated
phi (float): X-ray emission elevation angle in degrees.
epsrel (float): The tolerance parameter used in numeric integration.
monitor: A function to be called after each iteration with arguments finished_count, total_count. See for example :obj:`console_monitor`.
z (int): Atomic number of the material.
Returns:
:obj:`Spectrum`: The calculated spectrum
"""
# Prepare spectrum
s = Spectrum()
s.x = mesh
mesh_len = len(mesh)
# Prepare integrand function
fluence = get_fluence(e_0)
cs = get_cs(e_0, z=z)
mu = get_mu_csda(e_0, z=z)
# quad may raise warnings about the numerical integration method,
# which are related to the estimated accuracy. Since this is not relevant,
# they are suppressed.
warnings.simplefilter("ignore")
for i, e_g in enumerate(s.x):
s.y.append(integrate_source(fluence, cs, mu, theta, e_g, e_0, phi=phi, epsrel=epsrel, z=z))
if monitor is not None:
monitor(i + 1, mesh_len)
if z == 74:
add_char_radiation(s)
return s
def calculate_spectrum(e_0, theta, e_min, num_e, phi=0.0, epsrel=0.2, monitor=console_monitor, z=74):
"""
Calculates the x-ray spectrum for given parameters.
Characteristic peaks are also calculated by add_char_radiation, which is called with the default parameters.
Args:
e_0 (float): Electron kinetic energy in keV
theta (float): X-ray emission angle in degrees, the normal being at 90º
e_min (float): Minimum kinetic energy to calculate in the spectrum in keV
num_e (int): Number of points to calculate in the spectrum
phi (float): X-ray emission elevation angle in degrees.
epsrel (float): The tolerance parameter used in numeric integration.
monitor: A function to be called after each iteration with arguments finished_count, total_count. See for example :obj:`console_monitor`.
z (int): Atomic number of the material.
Returns:
:obj:`Spectrum`: The calculated spectrum
"""
return calculate_spectrum_mesh(e_0, theta, np.linspace(e_min, e_0, num=num_e, endpoint=True), phi=phi,
epsrel=epsrel, monitor=monitor, z=z)
def cli():
import argparse
import sys
parser = argparse.ArgumentParser(description='Calculate a bremsstrahlung spectrum.')
parser.add_argument('e_0', metavar='E0', type=float,
help='Electron kinetic energy in keV')
parser.add_argument('theta', metavar='theta', type=float, default=12,
help="X-ray emission angle in degrees, the anode's normal being at 90º.")
parser.add_argument('--phi', metavar='phi', type=float, default=0,
help="X-ray emission altitude in degrees, the anode's normal being at 0º.")
parser.add_argument('--z', metavar='z', type=int, default=74,
help="Atomic number of the material (characteristic radiation is only available for z=74).")
parser.add_argument('--e_min', metavar='e_min', type=float, default=3.0,
help="Minimum kinetic energy in keV in the bremsstrahlung calculation.")
parser.add_argument('--n_points', metavar='n_points', type=int, default=50,
help="Number of points used in the bremsstrahlung calculation.")
parser.add_argument('--mesh', metavar='e_i', type=float, nargs='+',
help="Energy mesh where the bremsstrahlung will be calculated. "
"Overrides e_min and n_points parameters.")
parser.add_argument('--epsrel', metavar='tolerance', type=float, default=0.5,
help="Numerical tolerance in integration.")
parser.add_argument('-o', '--output', metavar='path', type=str,
help="Output file. Available formats are csv, xlsx, and pkl, selected by the file extension. "
"pkl appends objects using the pickle module. Note you have to import the Spectrum class "
" INTO THE NAMESPACE (e.g., from xpecgen.xpecgen import Spectrum) to load them. "
"If this argument is not provided, points are written to the standard output and "
"calculation monitor is not displayed.")
parser.add_argument('--overwrite', action="store_true",
help="If this flag is set and the output is a pkl file, overwrite its content instead of "
"appending.")
args = parser.parse_args()
if args.output is not None:
if "." not in args.output:
print("Output file format unknown", file=sys.stderr)
exit(-1)
else:
ext = args.output.split(".")[-1].lower()
if ext not in ["csv", "xlsx", "pkl"]:
print("Output file format unknown", file=sys.stderr)
exit(-1)
monitor = console_monitor
else:
monitor = None
if args.mesh is None:
mesh = np.linspace(args.e_min, args.e_0, num=args.n_points, endpoint=True)
else:
mesh = args.mesh
s = calculate_spectrum_mesh(args.e_0, args.theta, mesh, phi=args.phi, epsrel=args.epsrel, monitor=monitor, z=args.z)
x2, y2 = s.get_points()
if args.output is None:
[print("%.6g, %.6g" % (x, y)) for x, y in zip(x2, y2)]
elif ext == "csv":
s.export_csv(args.output)
elif ext == "xlsx":
s.export_xlsx(args.output)
elif ext == "pkl":
import pickle
print(args.overwrite)
if args.overwrite:
mode = "wb"
else:
mode = "ab"
with open(args.output, mode) as output:
pickle.dump(s, output, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
cli()
| gpl-3.0 |
Lx37/seaborn | seaborn/algorithms.py | 35 | 6889 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/util/_print_versions.py | 11 | 5141 | import os
import platform
import sys
import struct
import subprocess
import codecs
import locale
import importlib
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", '.'.join(map(str, sys.version_info))),
("python-bits", struct.calcsize("P") * 8),
("OS", "{sysname}".format(sysname=sysname)),
("OS-release", "{release}".format(release=release)),
# ("Version", "{version}".format(version=version)),
("machine", "{machine}".format(machine=machine)),
("processor", "{processor}".format(processor=processor)),
("byteorder", "{byteorder}".format(byteorder=sys.byteorder)),
("LC_ALL", "{lc}".format(lc=os.environ.get('LC_ALL', "None"))),
("LANG", "{lang}".format(lang=os.environ.get('LANG', "None"))),
("LOCALE", '.'.join(map(str, locale.getlocale()))),
])
except:
pass
return blob
def show_versions(as_json=False):
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("pyarrow", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("feather", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("s3fs", lambda mod: mod.__version__),
("fastparquet", lambda mod: mod.__version__),
("pandas_gbq", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
print("")
for k, stat in deps_blob:
print("{k}: {stat}".format(k=k, stat=stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
iaklampanos/bde-climate-1 | scripts/sc5.py | 1 | 23408 | #from __future__ import print_function
import os
import sys
import matplotlib
import multiprocessing
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import IPython
import subprocess
import sys
from subprocess import Popen, PIPE, STDOUT
import graphviz as gv
import json
import networkx as nx
from datetime import datetime
HIVE='localhost:10000'
PILOT_PATH = '/home/stathis/bde-climate-1'
NC_FILENAME = '/home/stathis/sc5/rsdscs_Amon_HadGEM2-ES_rcp26_r2i1p1_200512-203011.nc'
CLUSTER_USER = 'stathis'
#CLUSTER_IP = '172.17.20.114'
CLUSTER_IP = 'athina'
CLUSTER_DATA_DIR = '/home/stathis/Downloads'
CLUSTER_BUILD_DIR = '/home/stathis/Develop'
PROC='exp'
def get_stamp(msg=''):
return msg + ' ' + str(datetime.now())
def run_shell_command(s):
c = s.split('|')
cp = None # the current process
for i in range(0, len(c)):
c[i] = c[i].replace('**PIPE**', '|')
cin = cout = None
if i > 0:
cin = cp.stdout
if i < len(c)-1:
cout = subprocess.PIPE
sys.stdout.flush()
cp = subprocess.Popen(c[i], stdout=cout, stdin=cin, shell=True)
cp.wait()
def run_shell_command(s):
subprocess.call(s)
def help():
print('Hello, world')
def _create_user_structure():
''' make create-structure'''
run_shell_command("make -C /home/stathis/bde-climate-1 create-structure")
def updateIngestHTML(x):
global txarea
txarea.value += x.strip() + '<br/>'
def ingest(filename):
''' make ingest-file NETCDFFILE=yourfilewithfullpath '''
txarea.value += get_stamp('Starting ingest') + '\n'
s = execu(ingest_command(netcdffile=filename), pattern=None, fn=updateIngestHTML)
def execu(command, pattern=None, fn=sys.stdout.write):
"""
pattern is not really useful;
fn is being called multiple times until the command has completed
"""
p = subprocess.Popen(str(command), shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
while True:
l = p.stdout.readline()
if pattern == None:
fn(l)
else:
if str(l).find(pattern) > 0:
fn(l)
if p.poll() != None:
break
def ingest_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
netcdffile='somefile'):
curr_user = os.environ['USER']
sti0 = 'scp ./__FILE__ __UNAME__@__HOST__:__BUILD_DIR__/bde-climate-1'
sti1 = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s ingest-file-background NETCDFFILE=__FILE__ CUSER=__CUSER__"'
sti = sti0 + ' && ' + sti1
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__FILE__', netcdffile)
sti = sti.replace('__CUSER__', curr_user)
# print(sti)
return sti
def prov_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
netcdfkey='somekey'):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s get-prov DATASET=__KEY__ | tee -a /mnt/share500/logs/__CUSER__.log"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__KEY__', netcdfkey)
sti = sti.replace('__CUSER__', curr_user)
return sti
def monitor_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
proc=PROC):
# make monitor log
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s monitor-log CUSER=__CUSER__ PROC=__PROC__"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__PROC__', proc)
sti = sti.replace('__CUSER__', curr_user)
return sti
def export_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
netcdfkey='somekey'):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti0 = 'echo "Retrieving __FILE__..." && scp __UNAME__@__HOST__:__BUILD_DIR__/bde-climate-1/__FILE__ .'
sti1 = 'echo "Exporting __FILE__..." && ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s export-file CUSER=__CUSER__ NETCDFKEY=__KEY__ NETCDFOUT=__FILE__ | tee -a /mnt/share500/logs/__CUSER__.exp.log"'
sti = sti1 + ' && ' + sti0
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__KEY__', netcdfkey)
sti = sti.replace('__FILE__', 'exp_' + netcdfkey)
sti = sti.replace('__CUSER__', curr_user)
return sti
def datakeys_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s cassandra-get-datasets | tee -a /mnt/share500/logs/__CUSER__.log"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__CUSER__', curr_user)
return sti
global ncfiles
ncfiles = []
def filestolist(x):
ncfiles.append(x.strip())
def netcdf_files():
global ncfiles
ncfiles = []
execu('ls *.nc', fn=filestolist)
return ncfiles[:-1]
def netcdf_global_files():
#global gncfiles
#gncfiles = []
ncfiles = []
execu('ls *.nc | grep -E -v "wrf|met"', fn=filestolist)
return ncfiles[:-1]
global dataset_keys
dataset_keys = []
def populate_cassandra_keys_list(l):
dataset_keys.append(l.strip())
def get_cassandra_data_keys():
global dataset_keys
dataset_keys = []
execu(datakeys_command(), fn=populate_cassandra_keys_list)
return dataset_keys[:-1]
def update_export_HTML(x):
tx_export.value += x.strip() + '<br/>'
def update_wrf_HTML(x):
tx_wrf.value += x.strip() + '<br/>'
def export_clicked(b):
tx_export.value += get_stamp('Starting export') + '<br/>'
dexp = multiprocessing.Process(name='export', target=execu, args=(export_command(netcdfkey=dd_export.value), None, update_export_HTML,))
dexp.daemon = True
dexp.start()
#execu(export_command(netcdfkey=dd_export.value), fn=update_export_HTML, pattern=None)
def monitor_export_clicked(b):
global m_exp
global mexp
if not m_exp:
tx_export.value += get_stamp('Monitoring export') + '<br/>'
mexp = multiprocessing.Process(name='monitor_export', target=monitor_export)
mexp.daemon = True
mexp.start()
m_exp = True
else:
if mexp:
mexp.terminate()
def monitor_export():
execu(monitor_command(proc='exp'), fn=update_export_HTML, pattern=None)
def monitor_ingest():
execu(monitor_command(proc='ing'), fn=updateIngestHTML, pattern=None)
def monitor_wrf():
execu(monitor_command(proc='wrf'), fn=update_wrf_HTML, pattern=None)
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def extract_var(name):
toks = name.split('_')
for t in toks:
if t != 'exp':
return t
continue
return None
def plot_clicked(b):
global tx_plot
global dd_plot
IPython.display.clear_output()
#clear_output(wait=True)
# tx_plot.value += 'Plot clicked<br/>'
# tx_plot.value = '<script>$(".output").remove()</script>'
my_example_nc_file = dd_plot.value
fh = Dataset(my_example_nc_file, mode='r')
var = extract_var(my_example_nc_file)
if var == None:
tx_plot.value += 'Unknown error <br/>'
return
# tx_plot.value += var + '<br/>'
lons = fh.variables['lon'][:]
lats = fh.variables['lat'][:]
time = fh.variables['time'][:]
rsdscs = fh.variables[var][:]
#print(rsdscs.shape)
#print(rsdscs[0].shape)
rsdscs_units = fh.variables[var].units
fh.close()
lon_0 = lons.mean()
lat_0 = lats.mean()
m = Basemap(width=50000000,height=35000000,
resolution='l',projection='cyl',\
lat_ts=40,lat_0=lat_0,lon_0=lon_0)
lon, lat = np.meshgrid(lons, lats)
xi, yi = m(lon, lat)
#Add Size
fig = plt.figure(figsize=(16,16))
# Plot Data
cs = m.pcolor(xi,yi,np.squeeze(rsdscs[0]))
# Add Grid Lines
m.drawparallels(np.arange(-80., 81., 20.), labels=[1,0,0,0], fontsize=10)
m.drawmeridians(np.arange(-180., 181., 20.), labels=[0,0,0,1], fontsize=10)
# Add Coastlines, States, and Country Boundaries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# Add Colorbar
cbar = m.colorbar(cs, location='bottom', pad="10%")
cbar.set_label(rsdscs_units)
# Add Title
# plt.title('Surface Downwelling Clear-Sky Shortwave Radiation')
#myplot.show()
def display_plot_form():
global dd_plot
global dd_vars_plot
global bt_plot
global tx_plot
global ncfiles
netcdf_global_files()
dd_plot = widgets.Dropdown(
options=ncfiles,
description='Available keys:'
)
#dd_vars_plot = widgets.Dropdown(
# options=['var1','var2'],
# description='Vars:'
#)
bt_plot = widgets.Button(description='Plot')
bt_plot.on_click(plot_clicked)
tx_plot = widgets.HTML()
container = widgets.HBox(children=[dd_plot])
container2 = widgets.HBox(children=[bt_plot])
display(container)
display(container2)
display(tx_plot)
def display_export_form():
# Find available datasets/keys
# Display dropdown list
# Export button
global dd_export
global bt_export
global lt_export
global tx_export
global datakeys
global m_exp
global mexp
m_exp = False
datakeys = get_cassandra_data_keys()
l = widgets.HTML(
value = '<span style="color:#fff;">................................................... </span> '
)
dd_export = widgets.Dropdown(
options=get_cassandra_data_keys(),
description='Available keys:',
)
bt_export = widgets.Button(description="Export")
lt_export = widgets.Button(description="Monitor Export")
#tx_export = widgets.Textarea(height=3)
tx_export = widgets.HTML()
container = widgets.HBox(children=[dd_export, l, bt_export, lt_export])
bt_export.on_click(export_clicked)
lt_export.on_click(monitor_export_clicked)
display(container)
display(tx_export)
import traceback
def update_prov(l):
global provlist
global html_prov
l = l.strip()
if l == '': return
try:
provlist.append(json.loads(l))
except ValueError:
traceback.print_exc(file=sys.stdout)
print l
html_prov.value += 'Unknown error occurred...'
import matplotlib.image as mpimg
from IPython.display import Image, display
from IPython.core.display import HTML
def get_short_id(id):
return id[:6]+'...'
def prov_clicked(b):
global provlist
global html_prov
html_prov.value = ''
html_prov.value = 'Drawing data lineage... <br/>' + html_prov.value
provlist = []
execu(prov_command(netcdfkey=dd_prov.value), fn=update_prov)
g1 = gv.Digraph(format='png')
for i in provlist:
# print i
g1.node(get_short_id(i['id']), '\n'.join(i['paths']))
if i['parentid'] is not None:
g1.node(get_short_id(i['parentid']))
lbl = ''
if 'downscaling' in i and 'agentname' in i['downscaling'][0]:
lbl = i['downscaling'][0]['agentname'] + '\n' + i['downscaling'][0]['et']
g1.edge( get_short_id(i['parentid']), get_short_id(i['id']), label=lbl )
if i['bparentid'] is not None:
g1.node(get_short_id(i['bparentid']))
lbl = ''
if 'downscaling' in i and 'agentname' in i['downscaling'][0]:
lbl = i['downscaling'][0]['agentname'] + '\n' + i['downscaling'][0]['et']
g1.edge( get_short_id(i['bparentid']), get_short_id(i['id']), label=lbl )
# print g1.source
g1.render(filename='img/g1')
#html_prov.value = '<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" /> <meta http-equiv="Pragma" content="no-cache" /> <meta http-equiv="Expires" content="0" />'
html_prov.value = '<div><img id="myimg" src=""></div>'
html_prov.value += ' <script> d = new Date(); $("#myimg").attr("src", "img/g1.png?"+d.getTime()); console.log(d.getTime());</script>'
def wrf_clicked(b):
global dd_reg_wrf
global dd_st_wrf
global dd_dur_wrf
global mx_wrf
reg = None
stdate = None
dur = None
if dd_reg_wrf.value == 'Europe':
reg = 'd01'
elif dd_reg_wrf.value == 'Greece':
reg = 'd02'
elif dd_reg_wrf.value == 'Europe-->Greece':
reg = 'd01d02'
else:
pass
if mx_wrf.value:
print "on development"
#monitor_wrf()
else:
IPython.display.clear_output()
print get_stamp('Starting WRF')
stdate = dd_st_wrf.value.replace('-', '')
dur = dd_dur_wrf.value
if reg == 'd01d02':
#print 'run nesting'
execu(wrf_command_nest(region=reg, startdate=stdate, duration=dur), pattern=None)
else:
execu(wrf_command(region=reg, startdate=stdate, duration=dur), pattern=None)
def analytics_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
region ='d01',
day = '07'):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s hive-query-daily-indx REG=__REGION__ DAY=__DAY__ CUSER=__CUSER__ | tee -a /mnt/share500/logs/__CUSER__.log"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__REGION__', region)
sti = sti.replace('__DAY__', day)
sti = sti.replace('__CUSER__', curr_user)
return sti
def update_analytics_out(x):
global tx_an
if 'c0 |' in x:
toks = x.split('|')
mmax = toks[4]
mmin = toks[5]
tx_an.value = 'Max=' + str(mmax) + ', Min=' + str(mmin) + ' (K)<br/>'
def analytics_click(b):
global dd_days_an
global dd_region_an
global tx_an
tx_an.value = ''
reg = 'd02'
stdate = None
dur = None
if dd_region_an.value == 'Europe':
reg = 'd01'
elif dd_region_an.value == 'Greece':
reg = 'd02'
day = dd_days_an.value.split('-')[2]
execu(analytics_command(region=reg, day=day), fn=update_analytics_out)
if tx_an.value.strip() == '':
tx_an.value = 'Data not found'
def display_analytics_form():
global dd_days_an
global dd_region_an
global bt_an
global tx_an
dd_days_an = widgets.Dropdown(
options=['2016-07-01', '2016-07-03', '2016-07-07'],
value='2016-07-07',
description='Available days:'
)
dd_region_an = widgets.Dropdown(
options=['Europe', 'Greece'],
value='Greece',
description='Available regions:'
)
tx_an = widgets.HTML()
bt_an = widgets.Button(description='Calculate')
container = widgets.HBox(children=[dd_days_an, dd_region_an, bt_an])
bt_an.on_click(analytics_click)
display(container)
display(tx_an)
def display_wrf_form():
global dd_reg_wrf
global dd_st_wrf
global dd_dur_wrf
global bt_wrf
global tx_wrf
global mx_wrf
l = widgets.HTML(
value = '<span style="color:#fff;">................................................... </span> '
)
dd_reg_wrf = widgets.Dropdown(
options=["Europe", "Greece", "Europe-->Greece"],
value="Europe",
description='Available regions:',
)
dd_st_wrf = widgets.Dropdown(
options=["2016-07-01","2016-07-02","2016-07-03","2016-07-04","2016-07-05","2016-07-06","2016-07-07"],
value="2016-07-01",
description='Starting date:',
)
dd_dur_wrf= widgets.Dropdown(
options=['6', '12', '18', '24'],
value='6',
desciption='Duration:',
)
bt_wrf = widgets.Button(description="Run WRF")
mx_wrf = widgets.Checkbox(description="Run WRF Monitor", value=False)
tx_wrf = widgets.HTML()
container = widgets.HBox(children=[dd_reg_wrf, dd_st_wrf, dd_dur_wrf])
bt_wrf.on_click(wrf_clicked)
display(container)
display(bt_wrf)
display(mx_wrf)
display(tx_wrf)
def display_prov_form():
global dd_prov
global bt_prov
global html_prov
#global datakeys
#datakeys = get_cassandra_data_keys()
l = widgets.HTML(
value = '<span style="color:#fff;">................................................... </span> '
)
dd_prov = widgets.Dropdown(
options=get_cassandra_data_keys(),
description='Available keys:',
)
bt_prov = widgets.Button(description="Display lineage")
html_prov = widgets.HTML()
container = widgets.HBox(children=[dd_prov, l, bt_prov])
bt_prov.on_click(prov_clicked)
display(container)
display(html_prov)
def display_ingest_form():
global w
global b
global m
global txarea
netcdf_files()
l = widgets.HTML(
value = '<span style="color:#fff;">................................................... </span> '
)
w = widgets.Dropdown(
options=ncfiles,
description='Choose file:',
)
b = widgets.Button(description='Ingest')
m = widgets.Checkbox(description='Ingest Monitor', value=False)
txarea = widgets.HTML()
container = widgets.HBox(children=[w, l, b, m])
b.on_click(ingest_clicked)
display(container)
display(txarea)
def ingest_clicked(b):
global txarea
txarea.value = ''
ingest(filename=w.value)
if m.value:
print "on development"
#monitor_ingest()
else:
IPython.display.clear_output()
def export(filename):
''' make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile '''
''' MAY support more selective use-case '''
pass
def wrf_command(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
region ='R1',
startdate = '20070101',
duration = 6):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s run-wrf-background RSTARTDT=__STARTDATE__ RDURATION=__RDURATION__ REG=__REGION__ CUSER=__CUSER__"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__STARTDATE__', startdate)
sti = sti.replace('__RDURATION__', duration)
sti = sti.replace('__REGION__', region)
sti = sti.replace('__CUSER__', curr_user)
return sti
def wrf_command_nest(clusteruser=CLUSTER_USER,
clusterip=CLUSTER_IP,
clim1_dd=CLUSTER_DATA_DIR,
clim1_bd=CLUSTER_BUILD_DIR,
region ='R1',
startdate = '20070101',
duration = 6):
# make export-file NETCDFKEY=yourkeysearch NETCDFOUT=nameofnetcdfoutfile
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__/bde-climate-1 && make -s run-wrf-nest-background RSTARTDT=__STARTDATE__ RDURATION=__RDURATION__ REG=__REGION__ CUSER=__CUSER__"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__STARTDATE__', startdate)
sti = sti.replace('__RDURATION__', duration)
sti = sti.replace('__REGION__', region)
sti = sti.replace('__CUSER__', curr_user)
return sti
def _run_wrf():
'''make run-wrf RSTARTDT=StartDateOfModel RDURATION=DurationOfModelInHours REG=<d01|d02|d03> '''
pass
def _run_wrf_nest():
'''run-wrf RSTARTDT=StartDateOfModel RDURATION=DurationOfModelInHours REG=<d01d02|d02d03>'''
def view_data(data=0):
''' matplotlib thingy '''
print('a')
pass
def hive_command(clusteruser='stathis', clusterip='172.17.20.106', clim1_bd='/home/stathis/Downloads', clim1_dd='/home/stathis/bde-climate-1', command='ls /home'):
curr_user = os.environ['USER']
sti = 'ssh __UNAME__@__HOST__ -T "export CLIMATE1_CASSANDRA_DATA_DIR=__DATA_DIR__ && export CLIMATE1_BUILD_DIR=__BUILD_DIR__ && cd __BUILD_DIR__ && docker exec -i hive __COMMND__ | tee -a /mnt/share500/logs/__CUSER__.log"'
sti = sti.replace('__UNAME__', clusteruser)
sti = sti.replace( '__HOST__', clusterip)
sti = sti.replace('__DATA_DIR__', clim1_dd)
sti = sti.replace('__BUILD_DIR__', clim1_bd)
sti = sti.replace('__COMMND__', command)
sti = sti.replace('__CUSER__', curr_user)
print(sti)
return sti
def exec_bash(command):
return os.popen(command).read()
#p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
#output = p.stdout.read()
#return output
# Test:
# execu('ls / | grep "te"')
| apache-2.0 |
lostpg/computationalphysics_N2014301020009 | EX_08.py | 1 | 7600 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
import math
import matplotlib.pyplot as plt
from matplotlib import animation #这个包用于动画制作
class StadiumShape():
def __init__(self, alpha = 0.001):
self.alpha = alpha
self.xi = []
self.yi = []
self.x = []
self.y = []
def calcPoints(self):
j = 0
while j < math.pi :
self.xi.append(math.cos(j))
self.yi.append(math.sin(j) + self.alpha)
j += 0.01
for i in range(len(self.xi)):
# top half
self.x.append(self.xi[i])
self.y.append(self.yi[i])
# bottom half
self.x.append(self.xi[i])
self.y.append(-self.yi[i])
def showPlot(self):
pl.plot(self.x,self.y,'.')
pl.xlim(-1.1,1.1)
pl.ylim(-1.1,1.1)
pl.show()
class Billiard():
def __init__(self, init_x = 0.17, init_y = 0.0, init_vx = 0.15, init_vy = 0.1, time_step = 0.01, total_time = 1000, alpha = 0.01):
self.x = [init_x]
self.y = [init_y]
self.vx = init_vx
self.vy = init_vy
self.v = math.sqrt(init_vx ** 2 + init_vy ** 2)
self.dt = time_step
self.tt = total_time
self.alpha = alpha
self.t = 0
self.ps_x = []
self.ps_vx =[]
self.alpha = alpha
def trajectory(self):
while (self.t < self.tt):
self.t += self.dt
x_next = self.x[-1] + self.vx * self.dt
y_next = self.y[-1] + self.vy * self.dt
# 下一个点没有越界
if x_next ** 2 + (abs(y_next) - self.alpha) ** 2 < 1:
self.x.append(x_next)
self.y.append(y_next)
if abs(self.y[-1]) < 0.001:
self.ps_vx.append(self.vx)
self.ps_x.append(self.x[-1])
# 下个点成功越界,计算碰撞点并计算碰撞后速度
else:
divisor = 2
# 往回走一点点
while divisor <= 2048:
x_next -= (self.vx * self.dt / divisor)
y_next -= (self.vy * self.dt / divisor)
# 当下个点与边界距离在误差允许范围内时,跳出 loop
if abs(x_next ** 2 + (abs(y_next) - self.alpha) ** 2 - 1) < 0.00001:
divisor = 10000
# 如果刚刚往回走过了,撤销那一步,下一次走得再小一点
elif x_next ** 2 + (abs(y_next) - self.alpha) ** 2 < 1:
x_next += (self.vx * self.dt / divisor)
y_next += (self.vy * self.dt / divisor)
divisor *= 2
# 跳出 loop 后,认为已到达边界点,将当前 x_next, y_next 加入self.x,
# self.y,计算反弹后方向,当前位置也是此处边缘的法线
self.x.append(x_next)
self.y.append(y_next)
m = -1
if y_next < 0:
m = 1
vi_ver_x = x_next * (self.vx * x_next + self.vy * (y_next + m * self.alpha))
vi_ver_y = (y_next + m * self.alpha) * (self.vx * x_next + self.vy * (y_next + m * self.alpha))
vi_par_x = self.vx - vi_ver_x
vi_par_y = self.vy - vi_ver_y
vf_ver_x = - vi_ver_x
vf_ver_y = - vi_ver_y
self.vx = vf_ver_x + vi_par_x
self.vy = vf_ver_y + vi_par_y
if abs(self.y[-1]) < 0.001:
self.ps_vx.append(self.vx)
self.ps_x.append(self.x[-1])
# 使和速度等于初速度
# self.vx, self.vy = self.v * self.vx * (self.vx ** 2 + self.vy ** 2), self.v * self.vy * (self.vx ** 2 + self.vy ** 2)
def drawTrajectory(self):
stadium = StadiumShape(alpha = self.alpha)
stadium.calcPoints()
pl.plot(stadium.x, stadium.y,'.', color='k', linewidth=0.5)
pl.plot(self.x,self.y,'.', color='r', linewidth=0.1)
pl.ylim(-1.1,1.1)
pl.axis('equal')
pl.title('Stadium with $\\alpha$ = %r' % self.alpha)
pl.show()
def phaseSpace(self):
pl.plot(self.ps_x,self.ps_vx,'.')
pl.show()
class Comparison():
def draw(self):
billiard1 = Billiard(alpha=0.0, init_y = 0)
billiard2 = Billiard(alpha=0.0, init_y = 0.0001)
billiard1.trajectory()
billiard2.trajectory()
sep = []
time = []
for i in range(len(billiard1.x)):
time.append(i * billiard1.dt)
sep.append((billiard1.x[i]-billiard2.x[i]) ** 2 + (billiard1.y[i]-billiard2.y[i]) ** 2)
pl.semilogy(time, sep)
pl.title('Stadium with $\\alpha$ = %r, - divergence of two trajectories' % billiard1.alpha)
pl.xlabel('Time')
pl.ylabel('Separation')
pl.show()
def main1():
#原始轨迹图
billiard = Billiard(alpha=0.1)
billiard.trajectory()
billiard.drawTrajectory()
billiard.phaseSpace()
def main2():
#两个初始值有微小差异的球的轨迹分离度
comparison = Comparison()
comparison.draw()
def main3():
#两个球的动图
fig = plt.figure()
ax = plt.axes(title=('Stadium with $\\alpha$ = 0.1, - divergence of two trajectories'),
aspect='equal', autoscale_on=False, xlim=(-1.1,1.1),ylim=(-1.1,1.1),
xlabel=('x'),
ylabel=('y'))
billiard1 = Billiard(alpha=0.1, init_y = 0,total_time = 2000)
billiard1.trajectory()
billiard2 = Billiard(alpha=0.1, init_y = 0.0001,total_time = 2000)
billiard2.trajectory()
stadium = StadiumShape(alpha = 0.1)
stadium.calcPoints()
line1=ax.plot([],[],'b:')#初始化数据,line是轨迹,point是轨迹的头部
point1=ax.plot([],[],'bo',markersize=10)
line2=ax.plot([],[],'r:')
point2=ax.plot([],[],'ro',markersize=10)
circle=ax.plot([],[],'k.',markersize=1)
images=[]
def init():#该函数用于初始化动画
circle = ax.plot
line1=ax.plot([],[],'b:',markersize=8)
point1=ax.plot([],[],'bo',markersize=10)
line2=ax.plot([],[],'r:',markersize=8)
point2 = ax.plot([], [], 'ro', markersize=10)
circle=ax.plot([],[],'k.',markersize=1)
return line1,point1,line2,point2,circle
def anmi(i):#anmi函数用于每一帧的数据更新,i是帧数。
ax.clear()
circle=ax.plot(stadium.x,stadium.y,'k.',markersize=1)
line1=ax.plot(billiard1.x[0:100*i],billiard1.y[0:100*i], 'b:',markersize=8)
point1 = ax.plot(billiard1.x[100*i-1:100*i],billiard1.y[100*i-1:100*i],'bo', markersize=10)
line2 = ax.plot(billiard2.x[0:100*i],billiard2.y[0:100*i], 'r:',markersize=8)
point2 = ax.plot(billiard2.x[100*i-1:100*i],billiard2.y[100*i-1:100*i], 'ro', markersize=10)
return line1,point1,line2,point2,circle
#animation.FuncAnimation用于动画制作,输入init函数和anmi函数,frams是帧数,一共画500祯,interval是相邻祯的间隔,单位是毫秒,此处为1毫秒
anim = animation.FuncAnimation(fig, anmi, init_func=init, frames=500, interval=1, blit=False,repeat=False,)
plt.show()
if __name__ == '__main__':
main3()
| gpl-3.0 |
mwidner/mwidner.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
johnaparker/dynamics | examples/greens.py | 1 | 1606 | from tqdm import tqdm
from my_pytools.my_matplotlib.animation import trajectory_animation
import matplotlib.pyplot as plt
from scipy import constants
import numpy as np
import h5py
import miepy
import dynamics
# final time and time step
dt = 10e-9
tf = 1000*dt
time = np.arange(0,tf,dt)
# sphere properties
radius = 75e-9
density = 10490
Ag = miepy.materials.predefined.Ag()
source = miepy.sources.rhc_polarized_plane_wave(amplitude=3e8)
mass = 4/3*np.pi*radius**3*density
Iz = 2/5*mass*radius**2
wavelength = 800e-9
eps_b = 1.33**2
k = 2*np.pi*eps_b**0.5/wavelength
eps_ag = Ag.eps(wavelength)
alpha = 4*np.pi*radius**3*eps_b*(eps_ag - eps_b)/(eps_ag + 2*eps_b)*constants.epsilon_0
# fluid properties
mu = 0.6e-3 # liquid viscosity
temp = 1000 # temperature
# initial conditions
positions = [[-300e-9,0.0,0], [300e-9,0.0,0], [0,300e-9,0]]
positions = [[-300e-9,0.0,0], [300e-9,0.0,0]]
system = dynamics.particles(positions, mass, Iz, dt, outfile='out.h5')
dynamics.langevin(system, radius, temp, mu)
em = dynamics.point_dipole_electrodynamics(system, alpha, source, wavelength, eps_b)
dynamics.constrain_along_normal(system, [0,0,1])
for i,t in enumerate(tqdm(time)):
system.update()
system.empty_cache()
skip = 5
with h5py.File('out.h5','r') as f:
trajectories = f['positions'][...]
angles = f['angles'][...]
anim = trajectory_animation(trajectories[::skip]*1e9, radius*1e9, 'z', colors=['C0','C1', 'C2'], trail=100, angles=angles[::skip],
time=time[::skip]*1e6, time_unit = r'$\mu s$', number_labels=True, trail_type='fading', interval=30)
plt.show()
| mit |
mdesco/dipy | scratch/very_scratch/spherical_statistics.py | 20 | 5695 | import numpy as np
import dipy.core.meshes as meshes
import get_vertices as gv
from dipy.core.triangle_subdivide import create_unit_sphere
#from dipy.viz import fos
#from dipy.io import dicomreaders as dcm
#import dipy.core.geometry as geometry
#import matplotlib.pyplot as mplp
import dipy.core.sphere_plots as splot
# set up a dictionary of sphere points that are in use EITHER as a set
# directions for diffusion weighted acquisitions OR as a set of
# evaluation points for an ODF (orientation distribution function.
sphere_dic = {'fy362': {'filepath' : '/home/ian/Devel/dipy/dipy/core/data/evenly_distributed_sphere_362.npz', 'object': 'npz', 'vertices': 'vertices', 'omit': 0, 'hemi': False},
'fy642': {'filepath' : '/home/ian/Devel/dipy/dipy/core/data/evenly_distributed_sphere_642.npz', 'object': 'npz', 'vertices': 'odf_vertices', 'omit': 0, 'hemi': False},
'siem64': {'filepath':'/home/ian/Devel/dipy/dipy/core/tests/data/small_64D.gradients.npy', 'object': 'npy', 'omit': 1, 'hemi': True},
'create2': {},
'create3': {},
'create4': {},
'create5': {},
'create6': {},
'create7': {},
'create8': {},
'create9': {},
'marta200': {'filepath': '/home/ian/Data/Spheres/200.npy', 'object': 'npy', 'omit': 0, 'hemi': True},
'dsi101': {'filepath': '/home/ian/Data/Frank_Eleftherios/frank/20100511_m030y_cbu100624/08_ep2d_advdiff_101dir_DSI', 'object': 'dicom', 'omit': 0, 'hemi': True}}
def plot_sphere(v,key):
r = fos.ren()
fos.add(r,fos.point(v,fos.green, point_radius= 0.01))
fos.show(r, title=key, size=(1000,1000))
def plot_lambert(v,key):
lamb = geometry.lambert_equal_area_projection_cart(*v.T).T
(y1,y2) = lamb
radius = np.sum(lamb**2,axis=0) < 1
#print inner
#print y1[inner]
#print y1[-inner]
figure = mplp.figure(facecolor='w')
current = figure.add_subplot(111)
current.patch.set_color('k')
current.plot(y1[radius],y2[radius],'.g')
current.plot(y1[-radius],y2[-radius],'.r')
current.axes.set_aspect(aspect = 'equal', adjustable = 'box')
figure.show()
figure.waitforbuttonpress()
mplp.close()
def get_vertex_set(key):
if key[:6] == 'create':
number = eval(key[6:])
vertices, edges, faces = create_unit_sphere(number)
omit = 0
else:
entry = sphere_dic[key]
#print entry
if entry.has_key('omit'):
omit = entry['omit']
else:
omit = 0
filepath = entry['filepath']
if entry['object'] == 'npz':
filearray = np.load(filepath)
vertices = filearray[entry['vertices']]
elif sphere_dic[key]['object'] == 'npy':
vertices = np.load(filepath)
elif entry['object'] == 'dicom':
data,affine,bvals,gradients=dcm.read_mosaic_dir(filepath)
#print (bvals.shape, gradients.shape)
grad3 = np.vstack((bvals,bvals,bvals)).transpose()
#print grad3.shape
#vertices = grad3*gradients
vertices = gradients
if omit > 0:
vertices = vertices[omit:,:]
if entry['hemi']:
vertices = np.vstack([vertices, -vertices])
print key, ': number of vertices = ', vertices.shape[0], '(drop ',omit,')'
return vertices[omit:,:]
xup=np.array([ 1,0,0])
xdn=np.array([-1,0,0])
yup=np.array([0, 1,0])
ydn=np.array([0,-1,0])
zup=np.array([0,0, 1])
zdn=np.array([0,0,-1])
#for key in sphere_dic:
#for key in ['siem64']:
for key in ['fy642']:
v = gv.get_vertex_set(key)
splot.plot_sphere(v,key)
splot.plot_lambert(v,key,centre=np.array([0.,0.]))
equat, polar = meshes.spherical_statistics(v,north=xup,width=0.2)
l = 2.*len(v)
equat = equat/l
polar = polar/l
print '%6.3f %6.3f %6.3f %6.3f' % (equat.min(), equat.mean(), equat.max(), np.sqrt(equat.var()))
print '%6.3f %6.3f %6.3f %6.3f' % (polar.min(), polar.mean(), polar.max(), np.sqrt(polar.var()))
def spherical_statistics(vertices, north=np.array([0,0,1]), width=0.02):
'''
function to evaluate a spherical triangulation by looking at the
variability of numbers of vertices in 'vertices' in equatorial bands
of width 'width' orthogonal to each point in 'vertices'
'''
equatorial_counts = np.array([len(equatorial_zone_vertices(vertices, pole, width=width)) for pole in vertices if np.dot(pole,north) >= 0])
#equatorial_counts = np.bincount(equatorial_counts)
#args = np.where(equatorial_counts>0)
#print zip(list(args[0]), equatorial_counts[args])
polar_counts = np.array([len(polar_zone_vertices(vertices, pole, width=width)) for pole in vertices if np.dot(pole,north) >= 0])
#unique_counts = np.sort(np.array(list(set(equatorial_counts))))
#polar_counts = np.bincount(polar_counts)
#counts_tokens = [(uc, bin_counts[uc]) for uc in bin_counts if ]
#args = np.where(polar_counts>0)
#print '(number, frequency):', zip(unique_counts,tokens)
#print '(number, frequency):', counts_tokens
#print zip(args, bin_counts[args])
#print zip(list(args[0]), polar_counts[args])
return equatorial_counts, polar_counts
def spherical_proportion(zone_width):
# assuming radius is 1: (2*np.pi*zone_width)/(4*np.pi)
# 0 <= zone_width <= 2
return zone_width/2.
def angle_for_zone(zone_width):
return np.arcsin(zone_width/2.)
def coarseness(faces):
faces = np.asarray(faces)
coarseness = 0.0
for face in faces:
a, b, c = face
coarse = np.max(coarse, geom.circumradius(a,b,c))
return coarse
| bsd-3-clause |
petosegan/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 78 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
fdtomasi/string_kernel | string_kernel/io_utils.py | 1 | 3946 | import os
import sys
import csv
import pandas as pd
d = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
def shorten(x):
if len(x) % 3 != 0:
raise ValueError('Input length should be a multiple of three')
y = ''
for i in range(len(x) / 3):
y += d[x[3 * i:3 * i + 3]]
return y
def read_pdb(pdb_file, dialect='excel-tab'):
"""Read a protein database file.
From the pdb file, the heavy and light chain are extracted, with the
following criteria to divide them in CDRs:
# LCDR1: 24_L:34_L,
# LCDR2: 48_L:54_L,
# LCDR3: 89_L:98_L,
# HCDR1: 24_H:34_H,
# HCDR2: 51_H:57_H,
# HCDR3: 93_H:104_H
Hence L chain from 24 to 34, from 48 to 54 and so on.
The numbering is of pdb file, that is the kabat-chothia numbering.
There can be some IG with ids like 100A, 100B; they must be considered
if they are inside the intervals specified before.
Parameters
----------
pdb_file : str
A protein database file. Delimited according to `dialect`.
dialect : ('excel-tab', 'excel')
Dialect for csv.DictReader.
Returns
-------
dict : dictionary
For each CDR, return the correspondent sequence for the pdb_file
specified.
"""
try:
heavy = []
light = []
loops = ['LCDR1', 'LCDR2', 'LCDR3', 'HCDR1', 'HCDR2', 'HCDR3']
seqs = ['', '', '', '', '', '']
with open(pdb_file, 'rb') as f:
for line in f:
words = [w for w in line.split(" ") if w != '']
if words[0] != 'ATOM':
continue
if words[4] == 'H':
if heavy == [] or words[5] != heavy[-1][1]:
heavy.append((words[3], words[5]))
elif words[4] == 'L':
if light == [] or words[5] != light[-1][1]:
light.append((words[3], words[5]))
else:
raise ValueError
for pos, t in enumerate(light):
# remove the suffix A, B, D ...
n = int(t[1][:-1]) if t[1][-1].isalpha() else int(t[1])
if 24 <= n <= 34:
seqs[0] += t[0]
elif 48 <= n <= 54:
seqs[1] += t[0]
elif 89 <= n <= 98:
seqs[2] += t[0]
for pos, t in enumerate(heavy):
# remove the suffix A, B, D ...
n = int(t[1][:-1]) if t[1][-1].isalpha() else int(t[1])
if 24 <= n <= 34:
seqs[3] += t[0]
elif 51 <= n <= 57:
seqs[4] += t[0]
elif 93 <= n <= 104:
seqs[5] += t[0]
except IOError:
sys.exit('ERROR: File %s cannot be read' % pdb_file)
except Exception as e:
sys.exit('ERROR: {}'.format(e))
return {k: v for k, v in zip(loops, map(shorten, seqs))} # , light, heavy
def pdb_to_df(path):
loops = ['LCDR1', 'LCDR2', 'LCDR3', 'HCDR1', 'HCDR2', 'HCDR3']
df = pd.DataFrame(columns=loops)
filenames = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f)) and
f.endswith('.pdb')]
for f in filenames:
dict_seqs = read_pdb(f)
df = df.append(dict_seqs, ignore_index=True)
# # To insert the names of the sequences, load them from the merged file
# # after conversion
# __df = pd.read_csv('/home/fede/Dropbox/projects/Franco_Fabio_Marcat/'
# 'conversioni_ID/tab_final_merged_newid_mutation.csv')
# indexes = []
# for f in filenames:
# df1[df1['ID TM matrice'] == x]['new_id_tm']
df.index = map(lambda x: x.split('/')[-1], filenames)
return df
| mit |
pprett/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
srjit/fakenewschallange | code/python/eda/wmdistance.py | 1 | 2398 | import os
import sys
sys.path.append("../")
from gensim import models
import pandas as pd
import numpy as np
import preprocessing as pp
filename = "../../../data/sample.csv"
data = pd.read_csv(filename, sep=',')
data['header_features'] = data.Headline.apply(lambda x : pp.process(x))
data['content_features'] = data.articleBody.apply(lambda x : pp.process(x))
model = models.Word2Vec.load_word2vec_format('/media/sree/venus/code/word2vec/GoogleNews-vectors-negative300.bin', binary=True)
def sent2vec(words):
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
## create the header vector
header_vectors = np.zeros((data.shape[0], 300))
for i, q in enumerate(data.header_features.values):
header_vectors[i, :] = sent2vec(q)
# header_series = pd.Series(header_vectors)
# data['header_vector'] = header_series.values
## create the content vector
content_vectors = np.zeros((data.shape[0], 300))
for i, q in enumerate(data.content_features.values):
content_vectors[i, :] = sent2vec(q)
# content_series = pd.Series(content_vectors)
# data['content_vector'] = content_series.values
# model = KeyedVectors.load_word2vec_format('data/GoogleNews-vectors-negative300.bin.gz', binary=True)
data['wmd'] = data.apply(lambda x: model.wmdistance(x['header_features'], x['content_features']), axis=1)
# data['header_vectors'] = data.header_features.apply(lambda x : sent2vec(x))
# data['content_vectors'] = data.header_features.apply(lambda x : sent2vec(x))
## Word2Vec WMD Distance
def toCSV(stance, values):
metric = "word_mover_"
f = open(metric + stance + "_values.csv", "w")
try:
os.remove(f.name)
except OSError:
pass
f = open(metric + stance + "_values.csv", "w")
for i in values:
f.write(str(i) + "\n")
range_of_values = {}
for stance_level in np.unique(data.Stance):
filtered_rows = data[(data.Stance == stance_level)]
print("Statistics for group : " + stance_level)
## range of wmds
group_max_wmd = np.max(filtered_rows.wmd)
group_min_wmd = np.min(filtered_rows.wmd)
range_of_values[stance_level] = filtered_rows.wmd.tolist()
value_list = filtered_rows.wmd.tolist()
value_list.sort()
toCSV(stance_level, value_list)
| gpl-3.0 |
Vimos/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 2 | 7204 | import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.utils.fixes import norm
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
# check that if est doesn't have partial_fit, neither does SelectFromModel
transformer = SelectFromModel(estimator=RandomForestClassifier())
assert_false(hasattr(transformer, "partial_fit"))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold="0.1 * mean")
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = "1.0 * mean"
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
hamedhsn/incubator-airflow | scripts/perf/scheduler_ops_metrics.py | 30 | 6536 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
NMTHydro/Recharge | utils/tornadoPlot_SA.py | 1 | 4933 | # ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
import os
import matplotlib.pyplot as plt
from matplotlib import rc
from numpy import array, set_printoptions
from pandas import read_pickle, set_option, options
def round_to_value(number, roundto):
return round(number / roundto) * roundto
rc('mathtext', default='regular')
set_option('display.max_rows', None)
set_option('display.max_columns', None)
set_option('display.width', None)
set_option('display.precision', 3)
options.display.float_format = '${:,.2f}'.format
set_printoptions(threshold=3000, edgeitems=5000, precision=3)
set_option('display.height', None)
set_option('display.max_rows', None)
FACTORS = ['Temperature', 'Precipitation', 'Reference ET', 'Total Available Water (TAW)',
'Vegetation Density (NDVI)', 'Soil Ksat']
def make_tornado_plot(dataframe, factors, show=False, fig_path=None):
dfs = os.listdir(dataframe)
print 'pickled dfs: {}'.format(dfs)
filename = 'norm_sensitivity.pkl'
if filename in dfs:
df = read_pickle(os.path.join(dataframe, filename))
df.to_csv(os.path.join(fig_path, 'sample_norm_df.csv'))
print df
xx = 1
for index, row in df.iterrows():
print index, row
base = row[0][5]
lows = []
for fact in row:
lows.append(min(fact))
lows = array(lows)
values = []
for fact in row:
values.append(max(fact))
# The y position for each variable
ys = range(len(values))[::-1] # top to bottom
# Plot the bars, one by one
for y, low, value in zip(ys, lows, values):
# The width of the 'low' and 'high' pieces
low_width = base - low
high_width = abs(value - base)
# Each bar is a "broken" horizontal bar chart
plt.broken_barh(
[(low, low_width), (base, high_width)],
(y - 0.4, 0.8),
facecolors=['white', 'white'], # Try different colors if you like
edgecolors=['black', 'black'],
linewidth=1)
plt.subplots_adjust(left=0.32)
# Display the value as text. It should be positioned in the center of
# the 'high' bar, except if there isn't any room there, then it should be
# next to bar instead.
x = base + high_width / 2
if x <= base:
x = base + high_width
# plt.text(x, y, str(round(value - low, 1)) + 'mm', va='center', ha='center')
# Draw a vertical line down the middle
plt.axvline(base, color='black')
# Position the x-axis on the top, hide all the other spines (=axis lines)
axes = plt.gca() # (gca = get current axes)
axes.spines['left'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.xaxis.set_ticks_position('top')
# Make the y-axis display the factors
plt.yticks(ys, factors)
print 'location: {}'.format(index)
plt.title('{} [mm]'.format(index.replace('_', ' ')),
y=1.05)
# Set the portion of the x- and y-axes to show
plt.xlim(min(-20, 1.2 * min(lows)), base + 1.1 * max(values))
plt.ylim(-1, len(factors))
# plt.show()
if show:
plt.show()
# if fig_path:
# plt.savefig('{}_tornado'.format(index), fig_path, ext='jpg', dpi=500, close=True, verbose=True)
plt.close()
if __name__ == '__main__':
root = os.path.join('F:\\', 'ETRM_Inputs')
sensitivity = os.path.join(root, 'sensitivity_analysis')
pickles = os.path.join(sensitivity, 'pickled')
figure_save_path = os.path.join(sensitivity, 'figures')
make_tornado_plot(pickles, FACTORS, fig_path=figure_save_path, show=True)
# ========================== EOF ==============================================
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/indexes/multi/test_copy.py | 2 | 4111 | # -*- coding: utf-8 -*-
from copy import copy, deepcopy
import pandas.util.testing as tm
from pandas import (CategoricalIndex, IntervalIndex, MultiIndex, PeriodIndex,
RangeIndex, Series, compat)
def assert_multiindex_copied(copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(idx):
i_copy = idx.copy()
assert_multiindex_copied(i_copy, idx)
def test_shallow_copy(idx):
i_copy = idx._shallow_copy()
assert_multiindex_copied(i_copy, idx)
def test_view(idx):
i_view = idx.view()
assert_multiindex_copied(i_view, idx)
def test_copy_name(idx):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
# TODO: Remove or refactor MultiIndex not tested.
for name, index in compat.iteritems({'idx': idx}):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(idx):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
# TODO: REMOVE THIS TEST. MultiIndex is tested seperately as noted below.
for name, index in compat.iteritems({'idx': idx}):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_copy_and_deepcopy(indices):
if isinstance(indices, MultiIndex):
return
for func in (copy, deepcopy):
idx_copy = func(indices)
assert idx_copy is not indices
assert idx_copy.equals(indices)
new_copy = indices.copy(deep=True, name="banana")
assert new_copy.name == "banana"
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
wilsonkichoi/zipline | zipline/data/loader.py | 2 | 17292 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas_datareader.data import DataReader
import pytz
from six import iteritems
from six.moves.urllib_error import HTTPError
from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from ..utils.paths import (
cache_root,
data_root,
)
from ..utils.deprecate import deprecated
from ..utils.tradingcalendar import (
trading_day as trading_day_nyse,
trading_days as trading_days_nyse,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
try:
data = get_benchmark_returns(
symbol,
first_date - trading_day,
last_date,
)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache the new benchmark returns')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
logger.info('Loading stock: {}'.format(stock))
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
logger.info('Loading index: {} ({})'.format(name, ticker))
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
@deprecated(
'load_bars_from_yahoo is deprecated, please register a'
' yahoo_equities data bundle instead',
)
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| apache-2.0 |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/plot_from_pp.py | 1 | 5237 | """
Load pp, plot and save
"""
import os, sys
#import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
#from matplotlib import rc
#from matplotlib.font_manager import FontProperties
#from matplotlib import rcParams
#rc('font', family = 'serif', serif = 'cmr10')
#rc('text', usetex=True)
#rcParams['text.usetex']=True
#rcParams['text.latex.unicode']=True
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
model_name_convert_title = imp.load_source('util', '/home/pwille/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
pp_file = '3234_mean'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = 0
max_contour = 180
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
#experiment_ids = ['djzns', 'djznq', 'djzny', 'djznw', 'dkhgu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq','dkbhu' ]
experiment_ids = ['djzny' ]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/projects/cascade/pwille/moose_retrievals/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
#pc = iris(pfile)
pcube = iris.load_cube(pfile)
print pcube
#print pc
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
lats = pcube.coord('grid_latitude').points
lons = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print 'Rotated CS %s' % cs
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lon_corner_u,lat_corner_u = unrotate.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
else:
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
print lat_high_tick
print lat_low_tick
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,256)
cont = iplt.contourf(pcube, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = True
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units)
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title=pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{75})', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=12)
plt.show()
if not os.path.exists('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag)): os.makedirs('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag))
#plt.savefig('/home/pwille/figures/%s/%s/%s%s_%s.png' % (experiment_id, plot_diag, pcube.standard_name.title(), experiment_id, plot_diag), format='png', bbox_inches='tight')
#plt.close()
if __name__ == '__main__':
main()
| mit |
BookChan/content | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
jisantuc/MakinScoresEasy | scorer.py | 1 | 4560 | import ConfigParser
import argparse
import re
import pandas as pd
parser = argparse.ArgumentParser(
description=('Create weighted institutional rankings from RePEC'
' based on custom combinations of economic fields')
)
parser.add_argument('--codes', '-c', nargs='*', type=str, default=[],
help='Fields to search')
parser.add_argument('--weights', '-w', nargs='*', type=float, default=[],
help=('Weights to assign to scores in '
'each fields. Must match length '
'of --codes and --codes must be '
'specified'))
parser.add_argument('--outf', '-o', default=None,
help=('File to dump output. If '
'None, writes to stdout'))
parser.add_argument('--config',
help=('Location of config file. Ignores other '
'options if specified.'))
def table_for_code(code, weight=1):
"""
Parameters
==========
code: str
three-letter lowercase NEP field code
Returns
=======
Top rankings from code listed as a pandas DataFrame
"""
base_url = 'https://ideas.repec.org/top/top.%s.html'
scoresdf = pd.read_html(base_url % code.lower(),
encoding='utf-8')[0]
scoresdf.set_index('Institution', inplace=True)
scoresdf['weight'] = weight
scoresdf['weighted_score'] = scoresdf['weight'] * scoresdf['Score']
return scoresdf[
['Score', 'Authors', 'Rank', 'weight', 'weighted_score']
]
def weighted_scores(codes, weights=None):
"""
Weights institutions' scores in fields identified by codes according
to weights given in weights. If an institution isn't present in the
table for one of the codes present in codes, it is given the worst score
from that table.
If codes and weights are not the same length and weights is not None,
throws a ValueError.
Parameters
==========
codes: list of str
three-letter lowercase NEP field codes
weights: list of (positive) numerics
Importance attached to ranking in each code
Returns
=======
Table of weighted scores for each institution as pandas DataFrame
"""
# should be: join + suffixes? merge + suffixes? how does mult-join
# work again?
# alt: make single code score return a dict of {code: df} and use
# keys for suffixes
dfs = {code: table_for_code(code, weight) for code, weight in
zip(codes, weights)}
indices = []
indices = list(reduce(lambda x, y: x | y,
map(lambda x: set(x.index.tolist()), dfs.values())))
dfs = {code: dfs[code].reindex(indices).fillna(dfs[code]['Score'].max())
for code in dfs.keys()}
for val in dfs.values():
print val.columns
df = reduce(lambda x, y: x[['weighted_score']] + y[['weighted_score']],
dfs.values())
return df / sum(weights)
def read_config(config_file):
parser = ConfigParser.ConfigParser()
parser.read(config_file)
section = 'ScoringSettings'
return {
'codes': parser.get(section, 'codes').split(', '),
'weights': map(float, parser.get(section, 'weights').split(', ')),
'outf': parser.get(section, 'outf')
}
if __name__ == '__main__':
args = parser.parse_args()
if args.config:
opts = read_config(args.config)
codes = opts['codes']
weights = opts['weights']
outf = opts['outf']
else:
codes = args.codes if len(args.codes) > 0 else ['inst.all']
weights = args.weights if len(args.weights) > 0 else [1] * len(codes)
outf = args.outf if args.outf else 'stdout'
if len(weights) != len(codes):
raise ValueError(
('Number of weights passed (%d) must equal number of codes '
'(%d)') % (len(args.weights), len(args.codes))
)
scores = weighted_scores(codes, weights)\
.sort_values('weighted_score').round(3)
if outf != 'stdout':
scores.to_csv(outf, encoding='utf-8', header=True)
else:
with pd.option_context('max_rows', len(scores)):
print scores
if len(scores) > pd.get_option('max_row'):
print ('***********************************\n'
'Result df is pretty long. Consider writing to file. Be '
'prepared to do some scrolling.\n'
'***********************************')
| mit |
cuemacro/finmarketpy | finmarketpy_examples/fx_forwards_indices_examples.py | 1 | 8658 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to use finmarketpy to create total return indices for FX forwards with appropriate roll rules
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.timeseries import Calculations
from findatapy.util.loggermanager import LoggerManager
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
calculations = Calculations()
# Choose run_example = 0 for everything
# run_example = 1 - creating USDTRY total return index rolling forwards and compare with BBG indices
# run_example = 2 - creating AUDJPY (via AUDUSD and JPYUSD) total return index rolling forwards & compare with BBG indices
run_example = 0
from finmarketpy.curve.fxforwardscurve import FXForwardsCurve
###### Create total return indices plot for USDBRL using forwards
# We shall be using USDBRL 1M forward contracts and rolling them 5 business days before month end
if run_example == 1 or run_example == 0:
cross = 'USDBRL'
# Download more tenors
fx_forwards_tenors = ['1W', '1M', '2M', '3M']
# Get USDBRL data for spot, forwards + depos
md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007',
data_source='bloomberg', cut='NYC', category='fx-forwards-market',
tickers=cross,
fx_forwards_tenor=fx_forwards_tenors,
base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
# In case any missing values fill down (particularly can get this for NDFs)
df_market = market.fetch_market(md_request=md_request).fillna(method='ffill')
fx_forwards_curve = FXForwardsCurve()
# Let's trade a 1M forward, and we roll 5 business days (based on both base + terms currency holidays)
# before month end
df_cuemacro_tot_1M = fx_forwards_curve.construct_total_return_index(cross, df_market,
fx_forwards_trading_tenor='1M',
roll_days_before=5,
roll_event='month-end',
roll_months=1,
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
output_calculation_fields=True)
df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns]
# Now do a 3M forward, and we roll 5 business days before end of quarter(based on both base + terms currency holidays)
# before month end
df_cuemacro_tot_3M = fx_forwards_curve.construct_total_return_index(cross, df_market,
fx_forwards_trading_tenor='3M',
roll_days_before=5,
roll_event='month-end',
roll_months=3,
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
output_calculation_fields=True)
df_cuemacro_tot_3M.columns = [x.replace('forward-tot', 'forward-tot-3M-cuemacro') for x in df_cuemacro_tot_3M.columns]
# Get spot data
md_request.abstract_curve = None
md_request.category = 'fx'
df_spot = market.fetch_market(md_request=md_request)
df_spot.columns = [x + '-spot' for x in df_spot.columns]
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Get Bloomberg calculated total return indices (for 1M forwards rolled)
md_request.category = 'fx-tot-forwards'
df_bbg_tot_forwards = market.fetch_market(md_request)
df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns]
# Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg
# indices relatively well (both from spot and forwards). Also note the large difference with spot indices
# CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates
df = calculations.join([pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']),
pd.DataFrame(df_cuemacro_tot_3M[cross + '-forward-tot-3M-cuemacro.close']),
df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill')
df = calculations.create_mult_index_from_prices(df)
chart.plot(df)
###### Create total return indices plot for AUDJPY using the underlying USD legs (ie. AUDUSD & JPYUSD)
if run_example == 2 or run_example == 0:
cross = 'AUDJPY'
# Download more tenors
fx_forwards_tenors = ['1W', '1M', '2M', '3M']
# Parameters for how to construct total return indices, and the rolling rule
# 1M forward contract, and roll it 5 working days before month end
# We'll be constructing our total return index from AUDUSD and JPYUSD
fx_forwards_curve = FXForwardsCurve(fx_forwards_trading_tenor='1M',
roll_days_before=5,
roll_event='month-end', construct_via_currency='USD',
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
roll_months=1,
output_calculation_fields=True)
# Get AUDJPY (AUDUSD and JPYUSD) data for spot, forwards + depos and also construct the total returns forward index
md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007',
data_source='bloomberg', cut='NYC', category='fx',
tickers=cross,
fx_forwards_tenor=fx_forwards_tenors,
base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return',
abstract_curve=fx_forwards_curve)
# In case any missing values fill down (particularly can get this for NDFs)
df_cuemacro_tot_1M = market.fetch_market(md_request=md_request).fillna(method='ffill')
fx_forwards_curve = FXForwardsCurve()
df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns]
# Get spot data
md_request.abstract_curve = None
md_request.category = 'fx'
df_spot = market.fetch_market(md_request=md_request)
df_spot.columns = [x + '-spot' for x in df_spot.columns]
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Get Bloomberg calculated total return indices (for 1M forwards rolled)
md_request.category = 'fx-tot-forwards'
df_bbg_tot_forwards = market.fetch_market(md_request)
df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns]
# Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg
# indices relatively well (both from spot and forwards). Also note the large difference with spot indices
# CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates
df = calculations.join([pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']),
df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill')
df = calculations.create_mult_index_from_prices(df)
chart.plot(df) | apache-2.0 |
binghongcha08/pyQMD | GWP/2D/1.0.1/resample/c.py | 28 | 1767 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
f, (ax1, ax2) = plt.subplots(2, sharex=True)
#f.subplots_adjust(hspace=0.1)
#plt.subplot(211)
ax1.set_ylim(0,4)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
ax1.plot(data[:,0],data[:,x], linewidth=1)
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlabel('time')
ax1.set_ylabel('position [bohr]')
#plt.title('traj')
#plt.subplot(212)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,16):
ax2.plot(data[:,0],data[:,x], linewidth=1)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('$|c_i|$')
#plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylim(0,2)
#plt.subplot(2,2,4)
#data = np.genfromtxt(fname='wf.dat')
#data1 = np.genfromtxt(fname='wf0.dat')
#data0 = np.genfromtxt('../spo_1d/t500')
#plt.plot(data[:,0],data[:,1],'r--',linewidth=2)
#plt.plot(data0[:,0],data0[:,1],'k-',linewidth=2)
#plt.plot(data1[:,0],data1[:,1],'k-.',linewidth=2)
#plt.title('t=100')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlim(0.8,2.1)
#plt.xlabel('x')
#plt.ylabel('$\psi^*\psi$')
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
aewhatley/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
olafhauk/mne-python | mne/source_estimate.py | 2 | 127526 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Martin Luessi <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix, block_diag as sparse_block_diag
from .baseline import rescale
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .io.constants import FIFF
from .surface import (read_surface, _get_ico_surface, mesh_edges,
_project_onto_surface)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces, _get_src_nn,
_import_nibabel, _get_mri_info_data,
_get_atlas_values, _check_volume_labels,
read_freesurfer_lut)
from .transforms import _get_trans, apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,
_time_mask, warn, copy_function_doc_to_method_doc,
fill_doc, _check_option, _validate_type, _check_src_normal,
_check_stc_units, _check_pandas_installed,
_check_pandas_index_arguments, _convert_times, _ensure_int,
_build_data_frame, _check_time_format, _check_path_like,
sizeof_fmt, object_size)
from .viz import (plot_source_estimates, plot_vector_source_estimates,
plot_volume_source_estimates)
from .io.base import TimeMixin
from .io.meas_info import Info
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tobytes())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tobytes())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert (len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tobytes())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype != 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and ftype != 'w':
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
kwargs['vertices'] = [kwargs['vertices']]
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
ftype = 'surface'
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
ftype = kwargs.pop('src_type', 'surface')
if isinstance(kwargs['vertices'], np.ndarray):
kwargs['vertices'] = [kwargs['vertices']]
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
klass = VolVectorSourceEstimate
elif ftype == 'mixed':
klass = MixedVectorSourceEstimate
else:
assert ftype == 'surface'
klass = VectorSourceEstimate
if kwargs['data'].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == 'surface' and source_nn is None:
raise RuntimeError('No source vectors supplied.')
# infer Klass from src_type
if src_type == 'surface':
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ('volume', 'discrete'):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == 'mixed':
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == 'surface' # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3),
(n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3),
axes=[0, 2, 1]), data)
return Klass(
data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject
)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(self, data, vertices, tmin, tstep,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_data_ndim'), self.__class__.__name__
assert hasattr(self, '_src_type'), self.__class__.__name__
assert hasattr(self, '_src_count'), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel (%s) and sens_data (%s) have invalid '
'dimensions'
% (kernel.shape, sens_data.shape))
if sens_data.ndim != 2:
raise ValueError('The sensor data must have 2 dimensions, got '
'%s' % (sens_data.ndim,))
_validate_type(vertices, list, 'vertices')
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError('vertices must be a list with %d entries, '
'got %s' % (self._src_count, len(vertices)))
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing order.')
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError('Data (shape %s) must have %s dimensions for '
'%s' % (data.shape, self._data_ndim,
self.__class__.__name__))
if data.shape[0] != n_src:
raise ValueError(
f'Number of vertices ({n_src}) and stc.data.shape[0] '
f'({data.shape[0]}) must match')
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
'Data for VectorSourceEstimate must have '
'shape[1] == 3, got shape %s' % (data.shape,))
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
sz = sum(object_size(x) for x in (self.vertices + [self.data]))
s += f", ~{sizeof_fmt(sz)}"
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError('Cannot find peaks with no vertices')
vert_idx, time_idx, _ = _get_peak(
stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct source estimate data.
Parameters
----------
%(baseline_stc)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The baseline-corrected source estimate object.
Notes
-----
Baseline correction can be done multiple times.
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : str
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : str
File format to use. Currently, the only allowed values is "h5".
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if ftype != 'h5':
raise ValueError('%s objects can only be written as HDF5 files.'
% (self.__class__.__name__,))
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep, subject=self.subject,
src_type=self._src_type),
title='mnepython', overwrite=True)
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=1.0, time_viewer='auto',
subjects_dir=None,
figure=None, views='auto', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
brain = plot_source_estimates(
self, subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit, backend=backend,
spacing=spacing, title=title, show_traces=show_traces,
src=src, volume_options=volume_options, view_layout=view_layout,
add_data_kwargs=add_data_kwargs, verbose=verbose)
return brain
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,
include_tmax=include_tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
If appropriate, an anti-aliasing filter is applied before resampling.
See :ref:`resampling-and-decimating` for more information.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@fill_doc
def to_data_frame(self, index=None, scalings=None,
long_format=False, time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_longform_stc)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'subject']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ['time']
if self.subject is not None:
default_index = ['subject', 'time']
mindex.append(('subject', np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# triage surface vs volume source estimates
col_names = list()
kinds = ['VOL'] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate)):
kinds[:2] = ['LH', 'RH']
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(self, data, None, long_format, mindex, index,
default_index=default_index,
col_names=col_names, col_kind='source')
return df
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = 'surface'
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), 'label')
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == 'rh'
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose_meth)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surface')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@fill_doc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option('hemi', hemi, ('lh', 'rh', None))
vertex_offset = 0
if hemi is not None:
if hemi == 'lh':
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(
data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(tmin=tmin, tmax=tmax, mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
if np.iscomplexobj(self.data):
raise ValueError("Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving.")
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
super().save(fname)
logger.info('[done]')
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
info : instance Info
The measurement info.
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,
D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).
Mapping the Signal-To-Noise-Ratios of Cortical Sources in
Magnetoencephalography and Electroencephalography.
Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, 'fwd')
_validate_type(info, Info, 'info')
_validate_type(cov, Covariance, 'cov')
_check_stc_units(self)
if (self.data >= 0).all():
warn('This STC appears to be from free orientation, currently SNR'
' function is valid only for fixed orientation')
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,
use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',
allow_fixed_depth=False, limit=None)
G = G['sol']['data']
n_channels = cov['dim'] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov['data'])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
_check_option('hemi', hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_scalar_class')
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
def _get_src_normals(self, src, use_cps):
normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in
zip(src, self.vertices)])
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), 'directions')
_validate_type(src, (None, SourceSpaces), 'src')
if isinstance(directions, str):
_check_option('directions', directions, ('normal', 'pca'),
extra='when str')
if directions == 'normal':
if src is None:
raise ValueError(
'If directions="normal", src cannot be None')
_check_src_normal('normal', src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == 'pca'
x = self.data
if not np.isrealobj(self.data):
_check_option('stc.data.dtype', self.data.dtype,
(np.complex64, np.complex128))
dtype = \
np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.
directions *= signs
_check_option(
'directions.shape', directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
return stc, directions
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='lateral',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None, volume_options=1.,
view_layout='vertical', add_data_kwargs=None,
verbose=None): # noqa: D102
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = 'volume'
_src_count = None
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot_3d(self, subject=None, surface='white', hemi='both',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=0.1, time_viewer='auto',
subjects_dir=None,
figure=None, views='axial', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
return super().plot(
subject=subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure, views=views, colorbar=colorbar, clim=clim,
cortex=cortex, size=size, background=background,
foreground=foreground, initial_time=initial_time,
time_unit=time_unit, backend=backend, spacing=spacing, title=title,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',
bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',
transparent='auto', show=True, initial_time=None,
initial_pos=None, verbose=None):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data, src=src, subject=subject, subjects_dir=subjects_dir,
mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,
clim=clim, transparent=transparent, show=show,
initial_time=initial_time, initial_pos=initial_pos,
verbose=verbose)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, *, trans=None,
mri_resolution=True, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, trans=trans,
mri_resolution=mri_resolution, verbose=verbose)
@fill_doc
def in_label(self, label, mri, src, trans=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(trans_deprecated)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError('This method can only be used with whole-brain '
'volume source spaces')
_validate_type(label, (str, 'int-like'), 'label')
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {'Volume ID %s' % (label): _ensure_int(label)}
_dep_trans(trans)
label = _volume_labels(src, (mri, volume_label), mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : str
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
.. versionadded:: 0.17
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,
format=format)
nib.save(img, fname)
def as_volume(self, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(data, src, mri_resolution=mri_resolution,
mri_space=True, output=format)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
if ftype != 'h5' and len(self.vertices) != 1:
raise ValueError('Can only write to .stc or .w if a single volume '
'source space was used, use .h5 instead')
if ftype != 'h5' and self.data.dtype == 'complex':
raise ValueError('Can only write non-complex data to .stc or .w'
', use .h5 instead')
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices[0], data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices[0], data=self.data)
elif ftype == 'h5':
super().save(fname, 'h5')
logger.info('[done]')
@fill_doc
class VolVectorSourceEstimate(_BaseVolSourceEstimate,
_BaseVectorSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
# defaults differ: hemi='both', views='axial'
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot_3d(self, subject=None, hemi='both', colormap='hot',
time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='axial',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None,
volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None): # noqa: D102
return _BaseVectorSourceEstimate.plot(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = 'mixed'
_src_count = None
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
super().__init__(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[:self._n_surf_vert], self.vertices[:2],
self.tmin, self.tstep, self.subject, self.verbose)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert:], self.vertices[2:],
self.tmin, self.tstep, self.subject, self.verbose)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError('Used vertices do not match adjacency shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based adjacency will have holes.\n'
'Consider using distance-based adjacency or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using '
'setup_source_space with add_dist=True')
blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse_block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surface')
adj = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack([sparse.hstack([empties[0], adj]),
sparse.hstack([adj.T, empties[1]])])
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int64)
adjacency = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def _pca_flip(flip, data):
U, s, V = linalg.svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
'mean': lambda flip, data: np.mean(data, axis=0),
'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),
'max': lambda flip, data: np.max(np.abs(data), axis=0),
'pca_flip': _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s['vertno'] for s in src]
for s, v in zip(src, vertices):
s['vertno'] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s['vertno'] = v
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# If src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space.
# If stc=None (i.e. no activation time courses provided) and mode='mean',
# only computes vertex indices and label_flip will be list of None.
from .label import label_sign_flip, Label, BiHemiLabel
# if source estimate provided in stc, get vertices from source space and
# check that they are the same as in the stcs
if stc is not None:
vertno = stc.vertices
for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):
n_missing = (~np.in1d(v, s['vertno'])).sum()
if n_missing:
raise ValueError('%d/%d %s hemisphere stc vertices missing '
'from the source space, likely mismatch'
% (n_missing, len(v), hemi))
else:
vertno = [s['vertno'] for s in src]
nvert = [len(vn) for vn in vertno]
# initialization
label_flip = list()
label_vertidx = list()
bad_labels = list()
for li, label in enumerate(labels):
if use_sparse:
assert isinstance(label, dict)
vertidx = label['csr']
# This can happen if some labels aren't present in the space
if vertidx.shape[0] == 0:
bad_labels.append(label['name'])
vertidx = None
# Efficiency shortcut: use linearity early to avoid redundant
# calculations
elif mode == 'mean':
vertidx = sparse.csr_matrix(vertidx.mean(axis=0))
label_vertidx.append(vertidx)
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == 'rh':
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ('mean', 'max'): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = ('source space does not contain any vertices for %d label%s:\n%s'
% (len(bad_labels), _pl(bad_labels), bad_labels))
if not allow_empty:
raise ValueError(msg)
else:
msg += '\nAssigning all-zero time series.'
if allow_empty == 'ignore':
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _vol_src_rr(src):
return apply_trans(
src[0]['src_mri_t'], np.array(
[d.ravel(order='F')
for d in np.meshgrid(
*(np.arange(s) for s in src[0]['shape']),
indexing='ij')],
float).T)
def _volume_labels(src, labels, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == 'volume'
extra = ' when using a volume source space'
_import_nibabel('use volume atlas labels')
_validate_type(labels, ('path-like', list, tuple), 'labels' + extra)
if _check_path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError('labels, if list or tuple, must have length 2, '
'got %s' % (len(labels),))
mri, labels = labels
infer_labels = False
_validate_type(mri, 'path-like', 'labels[0]' + extra)
logger.info('Reading atlas %s' % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_data = vol_info['data']
atlas_values = np.unique(atlas_data)
if atlas_values.dtype.kind == 'f': # MGZ will be 'i'
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError('Non-integer values present in atlas, cannot '
'labelize')
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items()
if v in atlas_values}
labels = _check_volume_labels(labels, mri, name='labels[1]')
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info['vox_mri_t']
want = src[0].get('vox_mri_t', None)
if want is None:
raise RuntimeError(
'Cannot use volumetric atlas if no mri was supplied during '
'source space creation')
vox_mri_t, want = vox_mri_t['trans'], want['trans']
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
'atlas vox_mri_t does not match that used to create the source '
'space')
src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))
atlas_shape = atlas_data.shape
if atlas_shape != src_shape:
raise RuntimeError('atlas shape %s does not match source space MRI '
'shape %s' % (atlas_shape, src_shape))
atlas_data = atlas_data.ravel(order='F')
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]['interpolator']
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (atlas_data.size, len(src[0]['rr']))
interp = interp[:, src[0]['vertno']]
for k, v in labels.items():
mask = atlas_data == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]['vertno']
rr = _vol_src_rr(src)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [Label(v, hemi='lh', name=val)
for v, val in zip(vertices, labels.keys())]
nnz = sum(len(v) != 0 for v in vertices)
logger.info('%d/%d atlas regions had at least one vertex '
'in the source space' % (nnz, len(out_labels)))
return out_labels
def _dep_trans(trans):
if trans is not None:
warn('trans is no longer needed and will be removed in 0.23, do not '
'pass it as an argument', DeprecationWarning)
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, trans=None,
mri_resolution=True, verbose=None):
# loop through source estimates and extract time series
_dep_trans(trans)
_validate_type(src, SourceSpaces)
_check_option('mode', mode, sorted(_label_funcs.keys()) + ['auto'])
kind = src.kind
if kind in ('surface', 'mixed'):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == 'mixed' else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),
'source estimate')
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
_check_option(
'mode', mode, ('mean', 'max', 'auto'),
'when using a vector and/or volume source estimate')
mode = 'mean' if mode == 'auto' else mode
else:
mode = 'mean_flip' if mode == 'auto' else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError('stc not compatible with source space')
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space. Ensure you used '
'src from the forward or inverse operator, '
'as forward computation can exclude vertices.'
% (len(svn), len(vn)))
if not np.array_equal(svn, vn):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:],
dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx @ this_data
this_data.shape = \
(this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='auto',
allow_empty=False, return_generator=False,
*, trans=None, mri_resolution=True,
verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
return_generator : bool
If True, a generator instead of a list is returned.
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose)s
Returns
-------
%(eltc_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs, labels, src, mode=mode, allow_empty=allow_empty,
trans=trans, mri_resolution=mri_resolution)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
@verbose
def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum',
project=True, subjects_dir=None, src=None, verbose=None):
"""Create a STC from ECoG and sEEG sensor data.
Parameters
----------
evoked : instance of Evoked
The evoked data. Must contain ECoG, or sEEG channels.
%(trans)s
subject : str
The subject name.
distance : float
Distance (m) defining the activation "ball" of the sensor.
mode : str
Can be "sum" to do a linear sum of weights, "nearest" to
use only the weight of the nearest sensor, or "zero" to use a
zero-order hold. See Notes.
project : bool
If True, project the electrodes to the nearest ``'pial`` surface
vertex before computing distances. Only used when doing a
surface projection.
%(subjects_dir)s
src : instance of SourceSpaces
The source space.
.. warning:: If a surface source space is used, make sure that
``surf='pial'`` was used during construction.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The surface source estimate. If src is None, a surface source
estimate will be produced, and the number of vertices will equal
the number of pial-surface vertices that were close enough to
the sensors to take on a non-zero volue. If src is not None,
a surface, volume, or mixed source estimate will be produced
(depending on the kind of source space passed) and the
vertices will match those of src (i.e., there may be me
many all-zero values in stc.data).
Notes
-----
For surface projections, this function projects the ECoG sensors to
the pial surface (if ``project``), then the activation at each pial
surface vertex is given by the mode:
- ``'sum'``
Activation is the sum across each sensor weighted by the fractional
``distance`` from each sensor. A sensor with zero distance gets weight
1 and a sensor at ``distance`` meters away (or larger) gets weight 0.
If ``distance`` is less than the distance between any two electrodes,
this will be the same as ``'nearest'``.
- ``'weighted'``
Same as ``'sum'`` except that only the nearest electrode is used,
rather than summing across electrodes within the ``distance`` radius.
As as ``'nearest'`` for vertices with distance zero to the projected
sensor.
- ``'nearest'``
The value is given by the value of the nearest sensor, up to a
``distance`` (beyond which it is zero).
If creating a Volume STC, ``src`` must be passed in, and this
function will project sEEG sensors to nearby surrounding vertices.
Then the activation at each volume vertex is given by the mode
in the same way as ECoG surface projections.
.. versionadded:: 0.22
"""
from scipy.spatial.distance import cdist, pdist
from .evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(mode, str, 'mode')
_validate_type(src, (None, SourceSpaces), 'src')
_check_option('mode', mode, ('sum', 'single', 'nearest'))
# create a copy of Evoked using ecog and seeg
evoked = evoked.copy().pick_types(ecog=True, seeg=True)
# get channel positions that will be used to pinpoint where
# in the Source space we will use the evoked data
pos = evoked._get_channel_positions()
# remove nan channels
nan_inds = np.where(np.isnan(pos).any(axis=1))[0]
nan_chs = [evoked.ch_names[idx] for idx in nan_inds]
evoked.drop_channels(nan_chs)
pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]
# coord_frame transformation from native mne "head" to MRI coord_frame
trans, _ = _get_trans(trans, 'head', 'mri', allow_none=True)
# convert head positions -> coord_frame MRI
pos = apply_trans(trans, pos)
subject = _check_subject(None, subject, False)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if src is None: # fake a full surface one
rrs = [read_surface(op.join(subjects_dir, subject,
'surf', f'{hemi}.pial'))[0]
for hemi in ('lh', 'rh')]
src = SourceSpaces([
dict(rr=rr / 1000., vertno=np.arange(len(rr)), type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI)
for rr in rrs])
del rrs
keep_all = False
else:
keep_all = True
# ensure it's a usable one
klass = dict(
surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate,
)
_check_option('src.kind', src.kind, sorted(klass.keys()))
klass = klass[src.kind]
rrs = np.concatenate([s['rr'][s['vertno']] for s in src])
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
rrs = apply_trans(trans, rrs)
# projection will only occur with surfaces
logger.info(
f'Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} '
f'{src.kind} vertices: {mode} mode')
if project and src.kind == 'surface':
logger.info(' Projecting electrodes onto surface')
pos = _project_onto_surface(pos, dict(rr=rrs), project_rrs=True,
method='nearest')[2]
min_dist = pdist(pos).min() * 1000
logger.info(
f' Minimum {"projected " if project else ""}intra-sensor distance: '
f'{min_dist:0.1f} mm')
# compute pairwise distance between source space points and sensors
dists = cdist(rrs, pos)
assert dists.shape == (len(rrs), len(pos))
# only consider vertices within our "epsilon-ball"
# characterized by distance kwarg
vertices = np.where((dists <= distance).any(-1))[0]
logger.info(f' {len(vertices)} / {len(rrs)} non-zero vertices')
w = np.maximum(1. - dists[vertices] / distance, 0)
# now we triage based on mode
if mode in ('single', 'nearest'):
range_ = np.arange(w.shape[0])
idx = np.argmax(w, axis=1)
vals = w[range_, idx] if mode == 'single' else 1.
w.fill(0)
w[range_, idx] = vals
missing = np.where(~np.any(w, axis=0))[0]
if len(missing):
warn(f'Channel{_pl(missing)} missing in STC: '
f'{", ".join(evoked.ch_names[mi] for mi in missing)}')
nz_data = w @ evoked.data
if not keep_all:
assert src.kind == 'surface'
data = nz_data
offset = len(src[0]['vertno'])
vertices = [vertices[vertices < offset],
vertices[vertices >= offset] - offset]
else:
data = np.zeros(
(sum(len(s['vertno']) for s in src), len(evoked.times)),
dtype=nz_data.dtype)
data[vertices] = nz_data
vertices = [s['vertno'].copy() for s in src]
return klass(data, vertices, evoked.times[0], 1. / evoked.info['sfreq'],
subject=subject, verbose=verbose)
| bsd-3-clause |
dgwakeman/mne-python | mne/tests/test_epochs.py | 2 | 55177 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six.moves import zip
from mne.externals.six.moves import cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
assert_raises(NotImplementedError, epochs.__next__)
assert_equal(epochs.__iter__()._current, 0)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_array_equal(data[:, :, epochs_dec._decim_idx], data_dec)
evoked_dec = epochs_dec.average()
assert_array_equal(evoked.data[:, epochs_dec._decim_idx], evoked_dec.data)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(op.join(tempdir, 'test-epo.fif'))
epochs_read = read_epochs(op.join(tempdir, 'test-epo.fif'))
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(op.join(tempdir, 'test-epo.fif'))
# ensure bad events are not saved
epochs_read3 = read_epochs(op.join(tempdir, 'test-epo.fif'))
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(op.join(tempdir, 'test-epo.fif'))
epochs_read5 = read_epochs(op.join(tempdir, 'test-epo.fif'))
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_array_almost_equal(data, np.zeros_like(data), decimal=20)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
epochs.save(op.join(tempdir, 'test-epo.fif'))
epochs2 = read_epochs(op.join(tempdir, 'test-epo.fif'))
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
df.reset_index().columns[:3] == ['condition', 'epoch', 'time']
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, verbose=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0])
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=False, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
epochs = make_epochs(picks=picks)
epochs_meg = make_epochs(picks=picks_meg)
epochs_eeg = make_epochs(picks=picks_eeg)
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
# XXX unrelated bug? this crashes when proj == True
assert_array_equal(data1, data3)
assert_array_equal(data1, data2)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2)
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
def test_concatenate_epochs():
"""test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc.raw, None)
run_tests_if_main()
| bsd-3-clause |
kcavagnolo/astroML | book_figures/appendix/fig_broadcast_visual.py | 3 | 8624 | """
Broadcast Visualization
-----------------------
Figure A.1
A visualization of NumPy array broadcasting. Note that the extra memory
indicated by the dotted boxes is never allocated, but it can be convenient
to think about the operations as if it is.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Draw a figure and axis with no boundary
fig = plt.figure(figsize=(5, 3.75), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
def draw_cube(ax, xy, size, depth=0.4,
edges=None, label=None, label_kwargs=None, **kwargs):
"""draw and label a cube. edges is a list of numbers between
1 and 12, specifying which of the 12 cube edges to draw"""
if edges is None:
edges = range(1, 13)
x, y = xy
if 1 in edges:
ax.plot([x, x + size],
[y + size, y + size], **kwargs)
if 2 in edges:
ax.plot([x + size, x + size],
[y, y + size], **kwargs)
if 3 in edges:
ax.plot([x, x + size],
[y, y], **kwargs)
if 4 in edges:
ax.plot([x, x],
[y, y + size], **kwargs)
if 5 in edges:
ax.plot([x, x + depth],
[y + size, y + depth + size], **kwargs)
if 6 in edges:
ax.plot([x + size, x + size + depth],
[y + size, y + depth + size], **kwargs)
if 7 in edges:
ax.plot([x + size, x + size + depth],
[y, y + depth], **kwargs)
if 8 in edges:
ax.plot([x, x + depth],
[y, y + depth], **kwargs)
if 9 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth + size, y + depth + size], **kwargs)
if 10 in edges:
ax.plot([x + depth + size, x + depth + size],
[y + depth, y + depth + size], **kwargs)
if 11 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth, y + depth], **kwargs)
if 12 in edges:
ax.plot([x + depth, x + depth],
[y + depth, y + depth + size], **kwargs)
if label:
if label_kwargs is None:
label_kwargs = {}
ax.text(x + 0.5 * size, y + 0.5 * size, label,
ha='center', va='center', **label_kwargs)
solid = dict(c='black', ls='-', lw=1,
label_kwargs=dict(color='k'))
dotted = dict(c='black', ls=':', lw=0.5,
label_kwargs=dict(color='gray'))
depth = 0.3
#------------------------------------------------------------
# Draw top operation: vector plus scalar
draw_cube(ax, (1, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (2, 10), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (3, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 10), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '5', **solid)
draw_cube(ax, (7, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (8, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (12, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '5', **solid)
draw_cube(ax, (13, 10), 1, depth, [1, 2, 3, 6, 9], '6', **solid)
draw_cube(ax, (14, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '7', **solid)
ax.text(5, 10.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 10.5, '=', size=12, ha='center', va='center')
ax.text(1, 11.5, r'${\tt np.arange(3) + 5}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw middle operation: matrix plus vector
# first block
draw_cube(ax, (1, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
draw_cube(ax, (2, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (3, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '1', **solid)
draw_cube(ax, (1, 6.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (2, 6.5), 1, depth, [2, 3], '1', **solid)
draw_cube(ax, (3, 6.5), 1, depth, [2, 3, 7, 10], '1', **solid)
draw_cube(ax, (1, 5.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (2, 5.5), 1, depth, [2, 3], '1', **solid)
draw_cube(ax, (3, 5.5), 1, depth, [2, 3, 7, 10], '1', **solid)
# second block
draw_cube(ax, (6, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (7, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (8, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 6.5), 1, depth, range(2, 13), '0', **dotted)
draw_cube(ax, (7, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
draw_cube(ax, (8, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
draw_cube(ax, (6, 5.5), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
draw_cube(ax, (7, 5.5), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (8, 5.5), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
draw_cube(ax, (12, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
draw_cube(ax, (13, 7.5), 1, depth, [1, 2, 3, 6, 9], '2', **solid)
draw_cube(ax, (14, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '3', **solid)
draw_cube(ax, (12, 6.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 6.5), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 6.5), 1, depth, [2, 3, 7, 10], '3', **solid)
draw_cube(ax, (12, 5.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 5.5), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 5.5), 1, depth, [2, 3, 7, 10], '3', **solid)
ax.text(5, 7.0, '+', size=12, ha='center', va='center')
ax.text(10.5, 7.0, '=', size=12, ha='center', va='center')
ax.text(1, 9.0, r'${\tt np.ones((3,\, 3)) + np.arange(3)}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw bottom operation: vector plus vector, double broadcast
# first block
draw_cube(ax, (1, 3), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '0', **solid)
draw_cube(ax, (1, 2), 1, depth, [2, 3, 4, 7, 10], '1', **solid)
draw_cube(ax, (1, 1), 1, depth, [2, 3, 4, 7, 10], '2', **solid)
draw_cube(ax, (2, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
draw_cube(ax, (2, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (2, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
draw_cube(ax, (3, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
draw_cube(ax, (3, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (3, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# second block
draw_cube(ax, (6, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (7, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (8, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 2), 1, depth, range(2, 13), '0', **dotted)
draw_cube(ax, (7, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
draw_cube(ax, (8, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
draw_cube(ax, (6, 1), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
draw_cube(ax, (7, 1), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (8, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
draw_cube(ax, (12, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (13, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (14, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (12, 2), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 2), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 2), 1, depth, [2, 3, 7, 10], '3', **solid)
draw_cube(ax, (12, 1), 1, depth, [2, 3, 4], '2', **solid)
draw_cube(ax, (13, 1), 1, depth, [2, 3], '3', **solid)
draw_cube(ax, (14, 1), 1, depth, [2, 3, 7, 10], '4', **solid)
ax.text(5, 2.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 2.5, '=', size=12, ha='center', va='center')
ax.text(1, 4.5, r'${\tt np.arange(3).reshape((3,\, 1)) + np.arange(3)}$',
ha='left', size=12, va='bottom')
ax.set_xlim(0, 16)
ax.set_ylim(0.5, 12.5)
plt.show()
| bsd-2-clause |
GJL/flink | flink-python/pyflink/table/serializers.py | 13 | 3089 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import io
from pyflink.serializers import Serializer
from pyflink.table.utils import arrow_to_pandas, pandas_to_arrow
class ArrowSerializer(Serializer):
"""
Serializes pandas.Series into Arrow streaming format data.
"""
def __init__(self, schema, row_type, timezone):
super(ArrowSerializer, self).__init__()
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
def __repr__(self):
return "ArrowSerializer"
def dump_to_stream(self, iterator, stream):
writer = None
try:
for cols in iterator:
batch = pandas_to_arrow(self._schema, self._timezone, self._field_types, cols)
if writer is None:
import pyarrow as pa
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_from_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield arrow_to_pandas(self._timezone, self._field_types, [batch])
def load_from_iterator(self, itor):
class IteratorIO(io.RawIOBase):
def __init__(self, itor):
super(IteratorIO, self).__init__()
self.itor = itor
self.leftover = None
def readable(self):
return True
def readinto(self, b):
output_buffer_len = len(b)
input = self.leftover or (self.itor.next() if self.itor.hasNext() else None)
if input is None:
return 0
output, self.leftover = input[:output_buffer_len], input[output_buffer_len:]
b[:len(output)] = output
return len(output)
import pyarrow as pa
reader = pa.ipc.open_stream(
io.BufferedReader(IteratorIO(itor), buffer_size=io.DEFAULT_BUFFER_SIZE))
for batch in reader:
yield batch
| apache-2.0 |
XiaowenLin/cs598rk | scripts/spark_data_frame.py | 1 | 1187 | %history
from pyspark.sql.types import *
from pyspark.sql import Row
rdd = sc.textFile('data/realEstate.csv')
rdd = rdd.map(lambda line: line.split(","))
header = rdd.first()
rdd = rdd.filter(lambda line:line != header)
df = rdd.map(lambda line: Row(street = line[0], city = line[1], zip=line[2], beds=line[4], baths=line[5], sqft=line[6], price=line[9])).toDF()
import pandas
df.toPandas()
favorite_zip = df[df.zip == 95815]
favorite_zip.show(5)
favorite_zip.show()
df.select('city','beds').show(10)
df.groupBy("beds").count().show()
df.describe(['baths', 'beds','price','sqft']).show()
df.describe(['baths', 'beds','price','sqft']).show()
# regression with mllib
import pyspark.mllib
import pyspark.mllib.regression
from pyspark.mllib.regression import LabeledPoint
from pyspark.sql.functions import *
df = df.select('price','baths','beds','sqft')
df = df[df.baths > 0]
df = df[df.beds > 0]
df = df[df.sqft > 0]
df.describe(['baths','beds','price','sqft']).show()
temp = df.map(lambda line:LabeledPoint(line[0],[line[1:]]))
from pyspark.mllib.util import MLUtils
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.feature import StandardScaler
%history -f spark_data_frame.py
| mit |
zfrenchee/pandas | asv_bench/benchmarks/ctors.py | 4 | 1873 | import numpy as np
import pandas.util.testing as tm
from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex
from .pandas_vb_common import setup # noqa
class SeriesConstructors(object):
goal_time = 0.2
param_names = ["data_fmt", "with_index"]
params = [[lambda x: x,
list,
lambda arr: list(arr.astype(str)),
lambda arr: dict(zip(range(len(arr)), arr)),
lambda arr: [(i, -i) for i in arr],
lambda arr: [[i, -i] for i in arr],
lambda arr: ([(i, -i) for i in arr][:-1] + [None]),
lambda arr: ([[i, -i] for i in arr][:-1] + [None])],
[False, True]]
def setup(self, data_fmt, with_index):
N = 10**4
arr = np.random.randn(N)
self.data = data_fmt(arr)
self.index = np.arange(N) if with_index else None
def time_series_constructor(self, data_fmt, with_index):
Series(self.data, index=self.index)
class SeriesDtypesConstructors(object):
goal_time = 0.2
def setup(self):
N = 10**4
self.arr = np.random.randn(N, N)
self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)
self.s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')] * N * 10)
def time_index_from_array_string(self):
Index(self.arr_str)
def time_index_from_array_floats(self):
Index(self.arr)
def time_dtindex_from_series(self):
DatetimeIndex(self.s)
def time_dtindex_from_index_with_series(self):
Index(self.s)
class MultiIndexConstructor(object):
goal_time = 0.2
def setup(self):
N = 10**4
self.iterables = [tm.makeStringIndex(N), range(20)]
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
| bsd-3-clause |
AlexanderFabisch/scikit-learn | benchmarks/bench_mnist.py | 44 | 6801 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
brodoll/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModelFrame.py | 22 | 2075 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris, resample
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
mYst = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the mag spectrum
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(2,1,1)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-100,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', lw=2, markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(2,1,2)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.6, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.0, label='mXr')
binFreq = (fs/2.0)*np.arange(mYst.size)/(mYst.size)
plt.plot(binFreq,mYst,'r', lw=1.5, label='mYst')
plt.axis([0,maxplotfreq,-100,max(mYh)+2])
plt.legend(prop={'size':15})
plt.title('mYh + mXr + mYst')
plt.tight_layout()
plt.savefig('hpsModelFrame.png')
plt.show()
| agpl-3.0 |
jeremygilly/measurement-arduino-python | arduino_listener.py | 1 | 3233 | """ Full credit to Mahesh Venkitachalam at electronut.in who's initial code made this possible.
I've just edited a few things to make it useful for this case of datalogging.
"""
import sys, serial, argparse
import numpy as np
from time import sleep
from collections import deque
import itertools
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime
output = "none"
count = 0
# plot class
class AnalogPlot:
# constr
def __init__(self, strPort, maxLen):
# open serial port
self.ser = serial.Serial(strPort, 9600)
self.ax = deque([0.0]*maxLen)
self.ay = deque([0.0]*maxLen)
self.maxLen = maxLen
global output_file
output_file.write('yyyy-mm-dd hh:mm:ss' + ' ' + 'output' + '\n')
# add to buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
# print ('adding')
else:
buf.pop()
buf.appendleft(val)
# print ('appending right')
# add data
def add(self, data):
assert(len(data) == 1)
self.addToBuf(self.ax, data[0])
global output
output = str(data[0])
# self.outputFile(self, data[0])
# self.addToBuf(self.ay, data[1])
# update plot
def update(self, frameNum, a0, a1):
try:
line = self.ser.readline()
print(line)
data = [float(val) for val in line.split()]
# print data
if(len(data) == 1):
self.add(data)
print(data)
a0.set_data(range(self.maxLen), self.ax)
now = str(datetime.datetime.now())
global output_file
output_file.write(now + ', ' + output + '\n')
except KeyboardInterrupt:
print('exiting')
return a0
# clean up
def close(self):
# close serial
self.ser.flush()
self.ser.close()
# main() function
def main():
now = datetime.datetime.now()
formatted_date = now.strftime("%Y %m %d - %I %M %S")
print formatted_date
write_to_file_path = str(formatted_date) + ".txt"
global output_file
output_file = open(write_to_file_path, 'w+')
# create parser
parser = argparse.ArgumentParser(description="LDR serial")
# add expected arguments
parser.add_argument('--port', dest='port', required=True)
# parse args
args = parser.parse_args()
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = args.port
print('reading from serial port %s...' % strPort)
# plot parameters
analogPlot = AnalogPlot(strPort, 100)
print('plotting data...')
# set up animation
fig = plt.figure()
ax = plt.axes(xlim=(0, 300), ylim=(0, 400))
a0, = ax.plot([], [])
a1, = ax.plot([], [])
plt.ylabel('Response (mV)')
plt.xlabel('Samples')
anim = animation.FuncAnimation(fig, analogPlot.update,
fargs=(a0, a1),
interval=5)
# show plot
plt.show()
# clean up
analogPlot.close()
print('exiting.')
# call main
if __name__ == '__main__':
main()
| mit |
phdowling/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
robince/pyentropy_gcodexport | docs/source/conf.py | 4 | 6664 | # -*- coding: utf-8 -*-
#
# pyEntropy documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 13 04:02:39 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../sphinxext'))
# get devel version first
sys.path.insert(0,os.path.abspath('../../'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive', 'numpydoc' ]
# numpydoc settings
numpydoc_show_class_members = True
#autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyEntropy'
copyright = u'2009, Robin Ince'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.0'
# The full version, including alpha/beta/rc tags.
release = '0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "%s v%s"%(project,release)
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyEntropydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyEntropy.tex', u'pyEntropy Documentation',
u'Robin Ince', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
abulak/TDA-Cause-Effect-Pairs | outliers-plotter.py | 1 | 2206 | import numpy as np
import numpy.ma as ma
import os
import sys
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class PairOutlierPlotter:
"""
Outlier Plotter; requires outliers model and assumes outliers_$model and
orig_points are in the working directory
"""
def __init__(self, model):
self.name = os.getcwd()[-8:]
self.suffix = str(model)
outliers_path = os.path.join(os.getcwd(), 'outliers_' + self.suffix)
self.outliers = np.loadtxt(outliers_path, dtype=np.int)
self.points = np.loadtxt(os.path.join(os.getcwd(), 'std_points'))
def plot_outlier(self, i):
masked_points = ma.masked_array(self.points)
if i < 0:
plt.title(self.name)
plt.scatter(self.points[:, 0], self.points[:, 1], color='black',
alpha=0.7, s=15)
else:
outs = self.outliers[:i+1]
removed_points = self.points[self.outliers[:i + 1]]
masked_points[outs] = ma.masked
to_plot = masked_points.compressed().reshape(
self.points.shape[0] - i - 1, 2)
plt.title(self.name + ", outlier: " + str(i+1))
plt.scatter(to_plot[:, 0], to_plot[:, 1],
color='black', alpha=0.7, s=15)
plt.scatter(removed_points[:, 0], removed_points[:, 1],
color='red', alpha=0.7, s=15)
def save_plots_pdf(self):
print("Generating outlier plots of ", self.name, "for model:",
self.suffix)
pdf_file = os.path.join(os.getcwd(), 'outliers_' + self.suffix + '.pdf')
with PdfPages(pdf_file) as pdf:
for i in range(-1, self.outliers.shape[0]):
plt.figure(figsize=(12, 12))
self.plot_outlier(i)
pdf.savefig()
plt.close()
print("Done:", self.name, self.suffix)
def workflow(model):
p = PairOutlierPlotter(model)
p.save_plots_pdf()
if __name__ == "__main__":
if len(sys.argv) == 2:
workflow(sys.argv[1])
else:
print("Usage: outliers-plotter.py $MODEL")
| gpl-2.0 |
jlegendary/scikit-learn | sklearn/utils/tests/test_extmath.py | 130 | 16270 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
| bsd-3-clause |
asurve/incubator-systemml | src/main/python/systemml/converters.py | 8 | 12296 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write('\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file, output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError('The format ' + str(format) + ' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
save_tensor_csv(net.params[layerName][1].data, os.path.join(output_dir, layerName + '_bias.mtx'), shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
else:
raise ValueError('Unsupported number of parameters:' + str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(sc._jsc, deploy_file, caffemodel_file, output_dir, format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb, caffe, cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1,2,0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = pd.DataFrame(X)
inputColumns = ['C' + str(i) for i in pdf.columns]
outputColumns = inputColumns
assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')
out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))
if y is not None:
return out.select('features', 'label')
else:
return out.select('features')
def _convertSPMatrixToMB(sc, src):
src = coo_matrix(src, dtype=np.float64)
numRows = src.shape[0]
numCols = src.shape[1]
data = src.data
row = src.row.astype(np.int32)
col = src.col.astype(np.int32)
nnz = len(src.col)
buf1 = bytearray(data.tostring())
buf2 = bytearray(row.tostring())
buf3 = bytearray(col.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(buf1, buf2, buf3, numRows, numCols, nnz)
def _convertDenseMatrixToMB(sc, src):
numCols = getNumCols(src)
numRows = src.shape[0]
arr = src.ravel().astype(np.float64)
buf = bytearray(arr.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(buf, numRows, numCols)
def _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):
rowIndex = int(i / numRowsPerBlock)
tmp = src[i:min(i+numRowsPerBlock, rlen),]
mb = _convertSPMatrixToMB(sc, tmp) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, tmp)
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(mb, rowIndex, ret, numRowsPerBlock, rlen, clen)
return i
def convertToMatrixBlock(sc, src, maxSizeBlockInMB=8):
if not isinstance(sc, SparkContext):
raise TypeError('sc needs to be of type SparkContext')
isSparse = True if isinstance(src, spmatrix) else False
src = np.asarray(src, dtype=np.float64) if not isSparse else src
if len(src.shape) != 2:
src_type = str(type(src).__name__)
raise TypeError('Expected 2-dimensional ' + src_type + ', instead passed ' + str(len(src.shape)) + '-dimensional ' + src_type)
# Ignoring sparsity for computing numRowsPerBlock for now
numRowsPerBlock = int(math.ceil((maxSizeBlockInMB*1000000) / (src.shape[1]*8)))
multiBlockTransfer = False if numRowsPerBlock >= src.shape[0] else True
if not multiBlockTransfer:
return _convertSPMatrixToMB(sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)
else:
# Since coo_matrix does not have range indexing
src = csr_matrix(src) if isSparse else src
rlen = int(src.shape[0])
clen = int(src.shape[1])
ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(rlen, clen, isSparse)
[ _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen) for i in range(0, src.shape[0], numRowsPerBlock) ]
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(ret)
return ret
def convertToNumPyArr(sc, mb):
if isinstance(sc, SparkContext):
numRows = mb.getNumRows()
numCols = mb.getNumColumns()
createJavaObject(sc, 'dummy')
buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(mb)
return np.frombuffer(buf, count=numRows*numCols, dtype=np.float64).reshape((numRows, numCols))
else:
raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves
# Returns the mean of a model if defined otherwise None
def getDatasetMean(dataset_name):
"""
Parameters
----------
dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.
Returns
-------
mean: Mean value of model if its defined in the list DATASET_MEAN else None.
"""
try:
mean = DATASET_MEAN[dataset_name.upper()]
except:
mean = None
return mean
# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
# The above call returns a numpy array of shape (6, 50176) in NCHW format
def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,
color_mode = 'RGB', mean=None):
## Input Parameters
# color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,
# color_mode parameter is used to process image data in BGR format.
# mean: mean value is used to subtract from input data from every pixel value. By default value is None, so mean value not subtracted.
if img_shape is not None:
num_channels = img_shape[0]
size = (img_shape[1], img_shape[2])
else:
num_channels = 1 if im.mode == 'L' else 3
size = None
if num_channels != 1 and num_channels != 3:
raise ValueError('Expected the number of channels to be either 1 or 3')
from PIL import Image
if size is not None:
im = im.resize(size, Image.LANCZOS)
expected_mode = 'L' if num_channels == 1 else 'RGB'
if expected_mode is not im.mode:
im = im.convert(expected_mode)
def _im2NumPy(im):
if expected_mode == 'L':
return np.asarray(im.getdata()).reshape((1, -1))
else:
im = (np.array(im).astype(np.float))
# (H,W,C) -> (C,H,W)
im = im.transpose(2, 0, 1)
# RGB -> BGR
if color_mode == 'BGR':
im = im[...,::-1]
# Subtract Mean
if mean is not None:
for c in range(3):
im[:, :, c] = im[:, :, c] - mean[c]
# (C,H,W) --> (1, C*H*W)
return im.reshape((1, -1))
ret = _im2NumPy(im)
if add_rotated_images:
ret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))
if add_mirrored_images:
ret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))
return ret
def convertToPandasDF(X):
if not isinstance(X, pd.DataFrame):
return pd.DataFrame(X, columns=['C' + str(i) for i in range(getNumCols(X))])
return X
| apache-2.0 |
edhuckle/statsmodels | statsmodels/sandbox/infotheo.py | 33 | 16417 | """
Information Theoretic and Entropy Measures
References
----------
Golan, As. 2008. "Information and Entropy Econometrics -- A Review and
Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145.
Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics.
Wiley & Sons, Chichester.
"""
#For MillerMadow correction
#Miller, G. 1955. Note on the bias of information estimates. Info. Theory
# Psychol. Prob. Methods II-B:95-100.
#For ChaoShen method
#Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when
#there are unseen species in sample. Environ. Ecol. Stat. 10:429-443.
#Good, I. J. 1953. The population frequencies of species and the estimation of population parameters.
#Biometrika 40:237-264.
#Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685.
#For NSB method
#Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T.,
#S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478.
#Cambridge (Massachusetts): MIT Press.
#For shrinkage method
#Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of
#continuous features. In International Conference on Machine Learning.
#Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization
#bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer-
#ing, Monash University.
from statsmodels.compat.python import range, lzip, lmap
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import logsumexp as sp_logsumexp
#TODO: change these to use maxentutils so that over/underflow is handled
#with the logsumexp.
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array-like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return sp_logsumexp(a)
a = np.asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : string
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins == None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in range(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete
#TODO: looks okay but needs more robust tests for corner cases
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a)
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X
#TODO: make this entropy, and then have different measures as
#a method
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
-----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: haven't defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy
# Shannon's information content
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array-like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px)
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array-like
py : array-like
pxpy : array-like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase)
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase)
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
#### Generalized Entropies ####
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError("px is not a proper probability distribution")
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in string(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
#TODO: before completing this, need to rethink the organization of
# (relative) entropy measures, ie., all put into one function
# and have kwdargs, etc.?
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
if __name__ == "__main__":
print("From Golan (2008) \"Information and Entropy Econometrics -- A Review \
and Synthesis")
print("Table 3.1")
# Examples from Golan (2008)
X = [.2,.2,.2,.2,.2]
Y = [.322,.072,.511,.091,.004]
for i in X:
print(shannoninfo(i))
for i in Y:
print(shannoninfo(i))
print(shannonentropy(X))
print(shannonentropy(Y))
p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5]
plt.subplot(111)
plt.ylabel("Information")
plt.xlabel("Probability")
x = np.linspace(0,1,100001)
plt.plot(x, shannoninfo(x))
# plt.show()
plt.subplot(111)
plt.ylabel("Entropy")
plt.xlabel("Probability")
x = np.linspace(0,1,101)
plt.plot(x, lmap(shannonentropy, lzip(x,1-x)))
# plt.show()
# define a joint probability distribution
# from Golan (2008) table 3.3
w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]])
# table 3.4
px = w.sum(0)
py = w.sum(1)
H_X = shannonentropy(px)
H_Y = shannonentropy(py)
H_XY = shannonentropy(w)
H_XgivenY = condentropy(px,py,w)
H_YgivenX = condentropy(py,px,w)
# note that cross-entropy is not a distance measure as the following shows
D_YX = logbasechange(2,np.e)*stats.entropy(px, py)
D_XY = logbasechange(2,np.e)*stats.entropy(py, px)
I_XY = mutualinfo(px,py,w)
print("Table 3.3")
print(H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY)
print("discretize functions")
X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2,
24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4,
16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5,
12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6])
discX = discretize(X)
#CF: R's infotheo
#TODO: compare to pyentropy quantize?
print
print("Example in section 3.6 of Golan, using table 3.3")
print("Bounding errors using Fano's inequality")
print("H(P_{e}) + P_{e}log(K-1) >= H(X|Y)")
print("or, a weaker inequality")
print("P_{e} >= [H(X|Y) - 1]/log(K)")
print("P(x) = %s" % px)
print("X = 3 has the highest probability, so this is the estimate Xhat")
pe = 1 - px[2]
print("The probability of error Pe is 1 - p(X=3) = %0.4g" % pe)
H_pe = shannonentropy([pe,1-pe])
print("H(Pe) = %0.4g and K=3" % H_pe)
print("H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \
(H_pe+pe*np.log2(2), H_XgivenY))
print("or using the weaker inequality")
print("Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3)))
print("Consider now, table 3.5, where there is additional information")
print("The conditional probabilities of P(X|Y=y) are ")
w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]])
print(w2)
# not a proper distribution?
print("The probability of error given this information is")
print("Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3)))
print("such that more information lowers the error")
### Stochastic processes
markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
| bsd-3-clause |
jasonleaster/Machine_Learning | K_Means/km.py | 1 | 6556 | """
Programmer : EOF
File : km.py
Date : 2015.12.29
E-mail : [email protected]
Description :
The implementation of K-Means Model.
"""
import numpy
from numpy.random import uniform as rand
from matplotlib import pyplot
def euclidean_distance(list1, list2):
assert isinstance(list1, list)
assert isinstance(list2, list)
summer = 0
for item1, item2 in zip(list1, list2):
summer += (item1 - item2)**2
return numpy.sqrt(summer)
class KMeans:
def __init__(self, Mat, K, disFunc = euclidean_distance):
self._Mat = numpy.array(Mat)
self.SampleDem = self._Mat.shape[0]
self.SampleNum = self._Mat.shape[1]
self.classNum = K
self.distance = disFunc
# The boundary of training samples
self.scope = [[min(self._Mat[i, :]),
max(self._Mat[i, :])]
for i in range(self.SampleDem)]
"""
The result after classification.
classification[i][0] is the label of sample `i`.
classification[i][1] is the distance between sample `i` and
the mean point of that class.
"""
self.classification = [[None, None] for i in range(self.SampleNum)]
"""
Initialization of @meanVals in randomly in the scope.
"""
self.__initMeanVal__()
def __initMeanVal__(self):
while True:
self.meanVal = numpy.array([[rand(self.scope[i][0], self.scope[i][1])
for i in range(self.SampleDem)]
for j in range(self.classNum)]).transpose()
self.classify()
classSet = set()
for i in range(self.SampleNum):
classSet.add(self.classification[i][0])
if len(classSet) == self.classNum:
break
def classify(self):
for i in range(self.SampleNum):
minDis = +numpy.inf
label = None
for k in range(self.classNum):
d = self.distance(self._Mat[:, i].tolist(), self.meanVal[:, k].tolist())
if d < minDis:
minDis = d
label = k
self.classification[i][0] = label
self.classification[i][1] = minDis
"""
After you initialized this class, just call this
function and K Means Model will be built
"""
def train(self):
while True:
if self.stopOrNot():
return
for k in range(self.classNum):
mean = None
counter = 0
for i in range(self.SampleNum):
if self.classification[i][0] == k:
if mean == None:
mean = numpy.array(self._Mat[:, i])*1.
else:
mean += self._Mat[:, i]
counter += 1.
mean /= counter
self.meanVal[:, k] = mean
self.classify()
"""
Get the minimum inner distance of class `k`
"""
def minDisInClass(self, k):
assert k >= 0
minDis = +numpy.inf
for i in range(self.SampleNum):
if self.classification[i][0] == k:
summer = 0.
for j in range(self.SampleNum):
if self.classification[j][0] == k:
summer += self.distance(self._Mat[:, i].tolist(),
self._Mat[:, j].tolist())
if minDis > summer:
minDis = summer
opt_point = self._Mat[:, i]
return (minDis, opt_point)
"""
This function *may* update @self.meanVal and will check
whether to stop the training process.
Return True, if it should be stoped. Otherwise, return False
"""
def stopOrNot(self):
STOP = True
for k in range(self.classNum):
summer = 0.
for i in range(self.SampleNum):
if self.classification[i][0] == k:
summer += self.distance(self.meanVal[:,k].tolist(), self._Mat[:, i].tolist())
(minDis, new_mean_point) = self.minDisInClass(k)
if summer > minDis:
self.meanVal[:, k] = new_mean_point
STOP = False
continue
if STOP == True:
return True
else:
self.classify()
if self.stopOrNot() == True:
return True
else:
return False
return False
def show(self):
"""
Only support two demention feature samples!
Just a toy function.
If the feature is more than 2-dementionm, please
don't call this function in user program.
"""
assert self.SampleDem == 2
assert self.classNum <= 8
print "Means: "
print self.meanVal
width = 2
for k in range(self.classNum):
for i in range(self.SampleNum):
if self.classification[i][0] == 0:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "or", markersize = 10)
elif self.classification[i][0] == 1:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "og", markersize = 10)
elif self.classification[i][0] == 2:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "ob", markersize = 10)
elif self.classification[i][0] == 3:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "oc", markersize = 10)
elif self.classification[i][0] == 4:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "om", markersize = 10)
elif self.classification[i][0] == 5:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "oy", markersize = 10)
elif self.classification[i][0] == 6:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "ok", markersize = 10)
elif self.classification[i][0] == 7:
pyplot.plot(self._Mat[0][i], self._Mat[1][i], "ow", markersize = 10)
pyplot.axis([int(self.scope[0][0]) - 2*width, int(self.scope[0][1]) + 2*width,
int(self.scope[1][0]) - width, int(self.scope[1][1]) + width])
pyplot.title("The OutPut (figure by Jason Leaster)")
pyplot.show()
| gpl-2.0 |
tiagoams/blueC_fluxes | integ_adv2.py | 1 | 5857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
integ_adv
integrates tracer advection variables in NEMO-ERSEM starting at
coastline where flux is zero
NERC-DEFRA SSB-BlueC projects
Created on Wed 10:30:00 2017-06-07
@author: TAMS00
TODO
"""
print('loading modules...')
import os
import sys
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import math
print('done')
if (('Windows' in sys.platform) and
(os.environ['COMPUTERNAME']=='PC4447')):
base='c:/Users/tams00/Documents/nerc_ssb/c_fluxes/AMM7-HINDCAST-v0'
elif ( ('linux' in sys.platform) and
( ( 'jc.rl.ac.uk' in os.environ['HOSTNAME']) or
( 'ceda.ac.uk' in os.environ['HOSTNAME']) ) ):
#base='/group_workspaces/jasmin2/ssb/data/internal/AMM7-hindcasts/v0'
base = '/group_workspaces/jasmin2/ssb/data/internal/AMM7-hindcasts/v0.2'
elif ( ('linux' in sys.platform) and
( os.environ['HOSTNAME'][0:2] == 'es' ) ):
base='/nerc/n01/n01/momme/AMM7-HINDCAST-v0-erosion'
else:
print('base dir not defined')
modelpaths=[os.path.join(base+'/1981/01/','amm7_1d_19810101_19810131_grid_T.nc')] #,
bathy_path='~/nerc_ssb/bathy_meter.nc'
xadv_3d='XAD_O3_c_e3t'
yadv_3d='YAD_O3_c_e3t'
def is_land(bathy,row,col):
#if math.isclose(bathy.values[row,col], 0.0):
if np.isclose(bathy.values[row,col], 0.0):
return True
else:
return False
# integrate varArr from row,col in the given direction
# and save in intArr
#
# XAD[r,c] = adv_x[r,c+1] - adv_x[r,c] (tested empirically in embayment case)
# adv_x[r,c] = adv_x[r,c+1] - XAD[r,c] use when going west
# adv_x[r,c+1] = XAD[r,c] + adv_x[r,c] use when going east
#
# NOTE: imshow shows geographic array upside down
# YAD[r,c] = adv_y[r+1,c] - adv_y[r,c] (tested empirically in embayment case)
# adv_x[r,c] = adv_x[r+1,c] - YAD[r,c] use when going south
# adv_x[r+1,c] = YAD[r,c] + adv_x[r,c] use when going north
# Corrected Arakawa C indexing in NEMO
# https://www.nemo-ocean.eu/wp-content/uploads/NEMO_book.pdf pp.54
#
# V
# _
# j T |U
# i
#
# XAD[r,c] = adv_x[r,c] - adv_x[r,c-1]
# adv_x[r,c] = adv_x[r,c-1] + XAD[r,c] use when going east
# adv_x[r,c] = adv_x[r,c+1] - XAD[r,c+1] use when going west
#
# YAD[r,c] = adv_y[r,c] - adv_y[r-1,c]
# adv_y[r,c] = adv_y[r-1,c] + YAD[r,c] use when going north
# adv_y[r,c] = adv_y[r+1,c] - YAD[r+1,c] use when going south
# S.W.: as below, which results in opposite direction (postive right and up?)
# YAD[r,c] = adv_y[r-1,c] - adv_y[r,c]
# adv_y[r,c] = adv_y[r-1,c] - YAD[r,c] use when going north
# adv_y[r,c] = adv_y[r+1,c] + YAD[r+1,c] use when going south
def integrate_xadv(varArr,ocean):
# for XAD
[rows,cols]=varArr.shape
horArr=np.zeros([rows,cols])
for row in range(1,rows):
for col in range(1,cols):
if ( (col == 1) and ocean[row,col] ):
horArr[row,col] = +1 * varArr[row,col]
elif ( ocean[row,col] ):
horArr[row,col] = varArr[row,col] + horArr[row,col-1]
return horArr
def integrate_yadv(varArr,ocean):
# for XAD
[rows,cols]=varArr.shape
verArr=np.zeros([rows,cols])
for row in range(1,rows):
for col in range(1,cols):
if ( (row == 1) and ocean[row,col] ):
verArr[row,col] = +1 * varArr[row,col]
elif ( ocean[row,col] ):
verArr[row,col] = varArr[row,col] + verArr[row-1,col]
return verArr
def plot_results(bathy,verArr,horArr,modelout):
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(12,12))
lim1=max(abs(np.array([verArr.max(),verArr.min(),horArr.max(),horArr.min()])))/2
lim2=max(abs(np.array([modelout[xadv_3d][0,0,:,:].max(),modelout[xadv_3d][0,0,:,:].min()])))/2
axs[0,0].imshow(np.log10(bathy),origin='lower')
axs[0,0].set_title('bathymetry from bathy_meter.nc')
im = axs[0,1].imshow(horArr,origin='lower',vmax=lim1,vmin=-1*lim1,cmap='seismic')
axs[0,1].set_title('abs. hor. advection')
fig.colorbar(im, ax=axs[0,1])
im = axs[1,1].imshow(modelout[xadv_3d][0,0,:,:],origin='lower',vmax=lim2,vmin=-1*lim2,cmap='seismic')
axs[1,1].set_title(xadv_3d)
fig.colorbar(im, ax=axs[1,1])
im = axs[0,2].imshow(verArr,origin='lower',vmax=lim1,vmin=-1*lim1,cmap='seismic')
axs[0,2].set_title('abs. ver. advection')
fig.colorbar(im, ax=axs[0,2])
im = axs[1,2].imshow(modelout[yadv_3d][0,0,:,:],origin='lower',vmax=lim2,vmin=-1*lim2,cmap='seismic')
axs[1,2].set_title(yadv_3d)
fig.colorbar(im, ax=axs[1,2])
plt.show()
#****************************************************
# main() to take an optional 'argv' argument, which
# allows us to call it from the interactive Python prompt:
#****************************************************
def main(argv=None):
print('open_mfdataset')
modelout = xr.open_mfdataset(modelpaths)
varArr = modelout[yadv_3d][:3,:3,:,:].values
print('open_dataset')
bathy = xr.open_dataset(bathy_path)['Bathymetry'][:,:].squeeze()
print('done')
ocean = ~np.isclose(bathy,0)
[times,depths,rows,cols] = varArr.shape
# x advection from XAD trend
verArrDXY = np.zeros([depths,rows,cols])
for idepth in range(depths):
verArrTXY = np.zeros([times,rows,cols])
for itime in range(times):
print('itime: ',itime,' idepth:',idepth)
#varArr = varArr[itime,0,:,:].squeeze()
verArrTXY[itime,:,:] = integrate_yadv(varArr[itime,idepth,:,:].squeeze(),ocean)
verArrDXY[idepth,:,:] = verArrTXY.mean(axis=0)
verAvgArr = verArrDXY.mean(axis=0)
#verDA.to_netcdf('xadv.nc')
plot_results(bathy,verAvgArr,verAvgArr,modelout)
if __name__ == "__main__":
main()
| gpl-3.0 |
fabioticconi/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
yong-wang/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
d-mittal/pystruct | pystruct/tests/test_learners/test_crammer_singer_svm.py | 4 | 7793 |
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal, assert_equal)
from nose.tools import assert_greater
from sklearn.datasets import make_blobs
from sklearn.metrics import f1_score
from pystruct.models import MultiClassClf
from pystruct.learners import (OneSlackSSVM, NSlackSSVM, SubgradientSSVM)
def test_crammer_singer_model():
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
pbl = MultiClassClf(n_features=3, n_classes=3)
# test inference energy
rng = np.random.RandomState(0)
w = rng.uniform(size=pbl.size_joint_feature)
x = X[0]
y, energy = pbl.inference(x, w, return_energy=True)
assert_almost_equal(energy, np.dot(w, pbl.joint_feature(x, y)))
# test inference result:
energies = [np.dot(w, pbl.joint_feature(x, y_hat)) for y_hat in range(3)]
assert_equal(np.argmax(energies), y)
# test loss_augmented inference energy
y, energy = pbl.loss_augmented_inference(x, Y[0], w, return_energy=True)
assert_almost_equal(energy, np.dot(w, pbl.joint_feature(x, y)) + pbl.loss(Y[0], y))
# test batch versions
Y_batch = pbl.batch_inference(X, w)
Y_ = [pbl.inference(x, w) for x in X]
assert_array_equal(Y_batch, Y_)
Y_batch = pbl.batch_loss_augmented_inference(X, Y, w)
Y_ = [pbl.loss_augmented_inference(x, y, w) for x, y in zip(X, Y)]
assert_array_equal(Y_batch, Y_)
loss_batch = pbl.batch_loss(Y, Y_)
loss = [pbl.loss(y, y_) for y, y_ in zip(Y, Y_)]
assert_array_equal(loss_batch, loss)
def test_crammer_singer_model_class_weight():
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
pbl = MultiClassClf(n_features=3, n_classes=3, class_weight=[1, 2, 1])
rng = np.random.RandomState(0)
w = rng.uniform(size=pbl.size_joint_feature)
# test inference energy
x = X[0]
y, energy = pbl.inference(x, w, return_energy=True)
assert_almost_equal(energy, np.dot(w, pbl.joint_feature(x, y)))
# test inference_result:
energies = [np.dot(w, pbl.joint_feature(x, y_hat)) for y_hat in range(3)]
assert_equal(np.argmax(energies), y)
# test loss_augmented inference energy
y, energy = pbl.loss_augmented_inference(x, Y[0], w, return_energy=True)
assert_almost_equal(energy, np.dot(w, pbl.joint_feature(x, y)) + pbl.loss(Y[0], y))
# test batch versions
Y_batch = pbl.batch_inference(X, w)
Y_ = [pbl.inference(x, w) for x in X]
assert_array_equal(Y_batch, Y_)
Y_batch = pbl.batch_loss_augmented_inference(X, Y, w)
Y_ = [pbl.loss_augmented_inference(x, y, w) for x, y in zip(X, Y)]
assert_array_equal(Y_batch, Y_)
loss_batch = pbl.batch_loss(Y, Y_)
loss = [pbl.loss(y, y_) for y, y_ in zip(Y, Y_)]
assert_array_equal(loss_batch, loss)
def test_simple_1d_dataset_cutting_plane():
# 10 1d datapoints between 0 and 1
X = np.random.uniform(size=(30, 1))
Y = (X.ravel() > 0.5).astype(np.int)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
pbl = MultiClassClf(n_features=2)
svm = NSlackSSVM(pbl, check_constraints=True, C=10000)
svm.fit(X, Y)
assert_array_equal(Y, np.hstack(svm.predict(X)))
def test_blobs_2d_cutting_plane():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = NSlackSSVM(pbl, check_constraints=True, C=1000,
batch_size=1)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_2d_one_slack():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = OneSlackSSVM(pbl, check_constraints=True, C=1000)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_2d_subgradient():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = SubgradientSSVM(pbl, C=1000)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_equal_class_weights():
# test that equal class weight is the same as no class weight
X, Y = make_blobs(n_samples=80, centers=3, random_state=42)
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = OneSlackSSVM(pbl, C=10)
svm.fit(X_train, Y_train)
predict_no_class_weight = svm.predict(X_test)
pbl_class_weight = MultiClassClf(n_features=3, n_classes=3,
class_weight=np.ones(3))
svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10)
svm_class_weight.fit(X_train, Y_train)
predict_class_weight = svm_class_weight.predict(X_test)
assert_array_equal(predict_no_class_weight, predict_class_weight)
assert_array_almost_equal(svm.w, svm_class_weight.w)
def test_class_weights():
X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3,
shuffle=False)
X = np.hstack([X, np.ones((X.shape[0], 1))])
X, Y = X[:170], Y[:170]
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = OneSlackSSVM(pbl, C=10)
svm.fit(X, Y)
weights = 1. / np.bincount(Y)
weights *= len(weights) / np.sum(weights)
pbl_class_weight = MultiClassClf(n_features=3, n_classes=3,
class_weight=weights)
svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10)
svm_class_weight.fit(X, Y)
assert_greater(f1_score(Y, svm_class_weight.predict(X)),
f1_score(Y, svm.predict(X)))
def test_class_weights_rescale_C():
# check that our crammer-singer implementation with class weights and
# rescale_C=True is the same as LinearSVC's c-s class_weight implementation
from sklearn.svm import LinearSVC
X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3,
shuffle=False)
X = np.hstack([X, np.ones((X.shape[0], 1))])
X, Y = X[:170], Y[:170]
weights = 1. / np.bincount(Y)
weights *= len(weights) / np.sum(weights)
pbl_class_weight = MultiClassClf(n_features=3, n_classes=3,
class_weight=weights, rescale_C=True)
svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10, tol=1e-5)
svm_class_weight.fit(X, Y)
try:
linearsvm = LinearSVC(multi_class='crammer_singer',
fit_intercept=False, class_weight='auto', C=10)
linearsvm.fit(X, Y)
assert_array_almost_equal(svm_class_weight.w, linearsvm.coef_.ravel(),
3)
except TypeError:
# travis has a really old sklearn version that doesn't support
# class_weight in LinearSVC
pass
| bsd-2-clause |
evgchz/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56554 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
DistrictDataLabs/tribe | tribe/viz.py | 2 | 1967 | # tribe.viz
# Visualization utility for Email social network
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Nov 20 16:28:40 2014 -0500
#
# Copyright (C) 2014 District Data Labs
# For license information, see LICENSE.txt
#
# ID: viz.py [b96b383] [email protected] $
"""
Visualization utility for Email social network
"""
##########################################################################
## Imports
##########################################################################
import math
import networkx as nx
from operator import itemgetter
from functools import wraps
try:
import matplotlib.pyplot as plt
except (ImportError, RuntimeError):
import warnings
plt = None
def configure(func):
"""
Configures visualization environment.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if plt is None:
warnings.warn("matplotlib is not installed or you are using a virtualenv!")
return None
return func(*args, **kwargs)
return wrapper
@configure
def show_simple_network(nodes=12, prob=0.2, hot=False):
G = nx.erdos_renyi_graph(nodes, prob)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, node_color='#0080C9', node_size=500, linewidths=1.0)
nx.draw_networkx_edges(G, pos, width=1.0, style='dashed', alpha=0.75)
if hot:
center, _ = sorted(G.degree().items(), key=itemgetter(1))[-1]
nx.draw_networkx_nodes(G, pos, nodelist=[center], node_size=600, node_color="#D9AF0B")
plt.axis('off')
plt.show()
return G
@configure
def draw_social_network(G, path=None):
k = 1/math.sqrt(G.order()) * 2
pos = nx.spring_layout(G, k=k)
deg = [100*v for v in G.degree().values()]
nx.draw_networkx_nodes(G, pos, node_size=deg, linewidths=1.0, alpha=0.90)
nx.draw_networkx_edges(G, pos, width=1.0, style='dashed', alpha=0.75)
if path:
plt.savefig(path)
else:
plt.show()
| mit |
InvestmentSystems/function-pipe | doc/source/usage_df.py | 1 | 13071 |
import zipfile
import collections
import os
import webbrowser
import requests
import pandas as pd
import function_pipe as fpn
# source url
URL_NAMES = 'https://www.ssa.gov/oact/babynames/names.zip'
FP_ZIP = '/tmp/names.zip'
class Core:
def load_data_dict(fp):
'''Source data from ZIP and load into dictionary of DFs.
Returns:
ordered dict of DFs keyed by year
'''
# download if not already found
if not os.path.exists(fp):
r = requests.get(URL_NAMES)
with open(fp, 'wb') as f:
f.write(r.content)
post = collections.OrderedDict()
with zipfile.ZipFile(fp) as zf:
# get ZipInfo instances
for zi in sorted(zf.infolist(), key=lambda zi: zi.filename):
fn = zi.filename
if fn.startswith('yob'):
year = int(fn[3:7])
df = pd.read_csv(
zf.open(zi),
header=None,
names=('name', 'gender', 'count'))
df['year'] = year
post[year] = df
return post
def gender_count_per_year(data_dict):
records = []
for year, df in data_dict.items():
male = df[df['gender'] == 'M']['count'].sum()
female = df[df['gender'] == 'F']['count'].sum()
records.append((male, female))
return pd.DataFrame.from_records(records,
index=data_dict.keys(), # ordered
columns=('M', 'F'))
def percent(df):
post = pd.DataFrame(index=df.index)
sum = df.sum(axis=1)
for col in df.columns:
post[col] = df[col] / sum
return post
def year_range(df, start, end):
return df.loc[start:end]
def plot(df, fp='/tmp/plot.png', title=None):
#print('calling plot', fp)
if os.path.exists(fp):
os.remove(fp)
ax = df.plot(title=title)
fig = ax.get_figure()
fig.savefig(fp)
return fp
def open_plot(fp):
print('calling open plot')
os.system('eog ' + fp)
#-------------------------------------------------------------------------------
# approach 1: call lots of functions wiht lots of statements
def approach_statement():
dd = Core.load_data_dict(FP_ZIP)
#ddf = Core.data_df(dd)
g_count = Core.gender_count_per_year(dd)
g_percent = Core.percent(g_count)
g_sub = Core.year_range(g_percent, 1950, 2000)
fp = Core.plot(g_sub)
Core.open_plot(fp)
#-------------------------------------------------------------------------------
class FN:
@fpn.FunctionNode
def load_data_dict(fp):
'''Source data from ZIP and load into dictionary of DFs.
Returns:
ordered dict of DFs keyed by year
'''
# download if not already found
if not os.path.exists(fp):
r = requests.get(URL_NAMES)
with open(fp, 'wb') as f:
f.write(r.content)
post = collections.OrderedDict()
with zipfile.ZipFile(fp) as zf:
# get ZipInfo instances
for zi in sorted(zf.infolist(), key=lambda zi: zi.filename):
fn = zi.filename
if fn.startswith('yob'):
year = int(fn[3:7])
df = pd.read_csv(
zf.open(zi),
header=None,
names=('name', 'gender', 'count'))
df['year'] = year
post[year] = df
return post
@fpn.FunctionNode
def gender_count_per_year(data_dict):
records = []
for year, df in data_dict.items():
male = df[df['gender'] == 'M']['count'].sum()
female = df[df['gender'] == 'F']['count'].sum()
records.append((male, female))
return pd.DataFrame.from_records(records,
index=data_dict.keys(), # ordered
columns=('M', 'F'))
@fpn.FunctionNode
def percent(df):
post = pd.DataFrame(index=df.index)
sum = df.sum(axis=1)
for col in df.columns:
post[col] = df[col] / sum
return post
@fpn.FunctionNode
def year_range(df, start, end):
return df.loc[start:end]
@fpn.FunctionNode
def plot(df, fp='/tmp/plot.png', title=None):
#print('calling plot', fp)
if os.path.exists(fp):
os.remove(fp)
ax = df.plot(title=title)
fig = ax.get_figure()
fig.savefig(fp)
return fp
@fpn.FunctionNode
def open_plot(fp):
webbrowser.open(fp)
def approach_composition():
# partional function node arguments
f = (FN.load_data_dict
>> FN.gender_count_per_year
>> FN.year_range.partial(start=1950, end=2000)
>> FN.percent
>> FN.plot
>> FN.open_plot)
# using operators to scale percent
#f = (FN.load_data_dict
#>> FN.gender_count_per_year
#>> FN.year_range.partial(start=1950, end=2000)
#>> (FN.percent * 100)
#>> FN.plot
#>> FN.open_plot)
f(FP_ZIP)
# how do we plot more than one thing without resourcing data
#-------------------------------------------------------------------------------
class PN1:
@fpn.pipe_node
@fpn.pipe_kwarg_bind(fpn.PN_INPUT)
def load_data_dict(fp):
return Core.load_data_dict(fp)
@fpn.pipe_node
@fpn.pipe_kwarg_bind(fpn.PREDECESSOR_RETURN)
def gender_count_per_year(data_dict):
return Core.gender_count_per_year(data_dict)
@fpn.pipe_node
@fpn.pipe_kwarg_bind(fpn.PREDECESSOR_RETURN)
def percent(df):
return Core.percent(df)
@fpn.pipe_node_factory
@fpn.pipe_kwarg_bind(fpn.PREDECESSOR_RETURN)
def year_range(df, start, end):
return Core.year_range(df, start, end)
@fpn.pipe_node
@fpn.pipe_kwarg_bind(fpn.PREDECESSOR_RETURN)
def plot(df):
return Core.plot(df)
@fpn.pipe_node
@fpn.pipe_kwarg_bind(fpn.PREDECESSOR_RETURN)
def open_plot(df):
return Core.open_plot(df)
def approach_pipe_1():
f = (PN1.load_data_dict | PN1.gender_count_per_year
| PN1.percent | PN1.year_range(1900, 2000)
| PN1.plot | PN1.open_plot)
# with operator
f = (PN1.load_data_dict | PN1.gender_count_per_year
| PN1.percent * 100 | PN1.year_range(1900, 2000)
| PN1.plot | PN1.open_plot)
fpn.run(f, FP_ZIP)
#-------------------------------------------------------------------------------
# alternate approach where component functions take kwargs; only exposed args are those for pipe factories; use PipNodeInput as input
class PN2:
@fpn.pipe_node_factory
def load_data_dict(fp, **kwargs):
return Core.load_data_dict(fp)
@fpn.pipe_node
def gender_count_per_year(**kwargs):
return Core.gender_count_per_year(kwargs[fpn.PREDECESSOR_RETURN])
@fpn.pipe_node
def percent(**kwargs):
return Core.percent(kwargs[fpn.PREDECESSOR_RETURN])
@fpn.pipe_node_factory
def year_range(start, end, **kwargs):
return Core.year_range(kwargs[fpn.PREDECESSOR_RETURN], start, end)
@fpn.pipe_node
def plot(**kwargs):
return Core.plot(kwargs[fpn.PREDECESSOR_RETURN])
@fpn.pipe_node
def open_plot(**kwargs):
return Core.open_plot(kwargs[fpn.PREDECESSOR_RETURN])
def approach_pipe_2():
f = (PN2.load_data_dict(FP_ZIP) | PN2.gender_count_per_year
| PN2.percent * 100 | PN2.year_range(1900, 2000)
| PN2.plot | PN2.open_plot)
# with store and recall to do multiple operations
f = (PN2.load_data_dict(FP_ZIP) | PN2.gender_count_per_year
| PN2.percent * 100
| fpn.store('gpcent')
| PN2.year_range(1900, 2000) | PN2.plot | PN2.open_plot |
fpn.recall('gpcent')
| PN2.year_range(2001, 2015) | PN2.plot | PN2.open_plot
)
#fpn.run(f) # this implicitly passes a PipeNodeInput
f(pn_input=fpn.PipeNodeInput())
#-------------------------------------------------------------------------------
# use pipe node input to distribute data dict, also output directory
class PN4:
# refactor methods to take to take args
# add method that gets
class PNI(fpn.PipeNodeInput):
URL_NAMES = 'https://www.ssa.gov/oact/babynames/names.zip'
@classmethod
def load_data_dict(cls, fp):
if not os.path.exists(fp):
r = requests.get(cls.URL_NAMES)
with open(fp, 'wb') as f:
f.write(r.content)
post = collections.OrderedDict()
with zipfile.ZipFile(fp) as zf:
# get ZipInfo instances
for zi in sorted(zf.infolist(), key=lambda zi: zi.filename):
fn = zi.filename
if fn.startswith('yob'):
year = int(fn[3:7])
df = pd.read_csv(
zf.open(zi),
header=None,
names=('name', 'gender', 'count'))
df['year'] = year
post[year] = df
return post
def __init__(self, output_dir):
super().__init__()
self.output_dir = output_dir
fp_zip = os.path.join(output_dir, 'names.zip')
self.data_dict = self.load_data_dict(fp_zip)
# new function that is more general than gender_count_per_year
@fpn.pipe_node_factory
def name_count_per_year(name_match, **kwargs):
pni = kwargs[fpn.PN_INPUT]
records = []
for year, df in pni.data_dict.items():
counts = collections.OrderedDict()
sel_name = df['name'].apply(name_match)
for gender in ('M', 'F'):
sel_gender = (df['gender'] == gender) & sel_name
counts[gender] = df[sel_gender]['count'].sum()
records.append(tuple(counts.values()))
return pd.DataFrame.from_records(records,
index=pni.data_dict.keys(), # ordered
columns=('M', 'F'))
@fpn.pipe_node
def percent(**kwargs):
df = kwargs[fpn.PREDECESSOR_RETURN]
post = pd.DataFrame(index=df.index)
sum = df.sum(axis=1)
for col in df.columns:
post[col] = df[col] / sum
return post
@fpn.pipe_node_factory
def year_range(start, end, **kwargs):
return kwargs[fpn.PREDECESSOR_RETURN].loc[start:end]
@fpn.pipe_node_factory
def plot(file_name, title=None, **kwargs): # now we can pass a file name
pni = kwargs[fpn.PN_INPUT]
df = kwargs[fpn.PREDECESSOR_RETURN]
fp = os.path.join(pni.output_dir, file_name)
ax = df.plot(title=title)
ax.get_figure().savefig(fp)
print(fp)
return fp
@fpn.pipe_node
def open_plot(**kwargs):
webbrowser.open(kwargs[fpn.PREDECESSOR_RETURN])
@fpn.pipe_node_factory
def merge_gender_data(**kwargs):
pni = kwargs[fpn.PN_INPUT]
# get index from source data dict
df = pd.DataFrame(index=pni.data_dict.keys())
for k, v in kwargs.items():
if k not in fpn.PIPE_NODE_KWARGS:
for gender in ('M', 'F'):
df[k + '_' + gender] = v[gender]
return df
#@fpn.pipe_node
#def write_xlsx(**kwargs):
#pni = kwargs[fpn.PN_INPUT]
#xlsx_fp = os.path.join(pni.output_dir, 'output.xlsx')
#xlsx = pd.ExcelWriter(xlsx_fp)
#for k, df in pni.store_items():
#df.to_excel(xlsx, k)
#xlsx.save()
#return xlsx_fp
def approach_pipe_4a():
f = (PN4.name_count_per_year(lambda n: n.lower().startswith('lesl'))
| PN4.percent | PN4.plot('lesl.png') | PN4.open_plot
| PN4.name_count_per_year(lambda n: n.lower().startswith('dana'))
| PN4.percent | PN4.plot('dana.png') | PN4.open_plot
)
f[PN4.PNI('/tmp')] # use our derived PipeNodeInput
def approach_pipe_4b():
a = (PN4.name_count_per_year(lambda n: n.lower().startswith('lesl'))
| PN4.percent | fpn.store('lesl'))
b = (PN4.name_count_per_year(lambda n: n.lower().startswith('dana'))
| PN4.percent | fpn.store('dana'))
f = (PN4.merge_gender_data(lesl=a, dana=b)
| PN4.year_range(1920, 2000)
| fpn.store('merged') * 100
| PN4.plot('gender.png')
| PN4.open_plot)
pni = PN4.PNI('/tmp')
f[pni]
xlsx_fp = os.path.join(pni.output_dir, 'output.xlsx')
xlsx = pd.ExcelWriter(xlsx_fp)
for k, df in pni.store_items():
df.to_excel(xlsx, k)
xlsx.save()
os.system('libreoffice --calc ' + xlsx_fp)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
func = locals().get(sys.argv[1], None)
if func:
print(func)
func()
| mit |
charanpald/wallhack | wallhack/kcore/BoundExp.py | 1 | 2602 | import array
import numpy
import scipy.io
import scipy.sparse.linalg
import matplotlib
matplotlib.use("GTK3Agg")
import matplotlib.pyplot as plt
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.IdIndexer import IdIndexer
from sandbox.util.Latex import Latex
from apgl.graph.GraphUtils import GraphUtils
"""
We try to figure out the change in L_i and L_{i+1}
"""
numpy.set_printoptions(suppress=True, precision=4)
dataDir = PathDefaults.getDataDir() + "kcore/"
indexer = IdIndexer()
node1Inds = array.array("i")
node2Inds = array.array("i")
Ls = []
us = []
boundFro = []
bound2 = []
ks = []
eyes = []
deltas = []
for i in range(1, 9):
print(i)
networkFilename = dataDir + "network_1_kcores/network_1-core" + str("%02d" % (i,)) + ".txt"
networkFile = open(networkFilename)
networkFile.readline()
networkFile.readline()
networkFile.readline()
networkFile.readline()
node1Inds = array.array("i")
node2Inds = array.array("i")
for line in networkFile:
vals = line.split()
node1Inds.append(indexer.append(vals[0]))
node2Inds.append(indexer.append(vals[1]))
node1Inds = numpy.array(node1Inds)
node2Inds = numpy.array(node2Inds)
m = len(indexer.getIdDict())
A = numpy.zeros((m, m))
A[node1Inds, node2Inds] = 1
A = (A+A.T)/2
A = scipy.sparse.csr_matrix(A)
L = GraphUtils.normalisedLaplacianSym(A)
Ls.append(L)
u, V = scipy.sparse.linalg.eigs(L, k=m-2, which="SM")
u = u.real
inds = numpy.argsort(u)
u = u[inds]
V = V[:, inds]
us.append(u)
k0 = numpy.where(u > 0.01)[0][0]
k = numpy.argmax(numpy.diff(u[k0:]))
ks.append(k)
print("k0="+ str(k0) + " k="+ str(k))
V = V[:, k0:k0+k+1]
if i != 1:
E = L - Ls[-2]
E = numpy.array(E.todense())
EV = E.dot(V)
L = numpy.array(L.todense())
delta = us[-2][k0+k+1] - us[-1][k0+k]
boundFro.append(numpy.linalg.norm(EV)/delta)
bound2.append(numpy.linalg.norm(EV, ord=2)/delta)
eyes.append(i)
deltas.append(delta)
boundFro = numpy.array(boundFro)
bound2 = numpy.array(bound2)
eyes = numpy.array(eyes)-1
deltas = numpy.array(deltas)
#2 norm bound is bad
#Frobenius norm bound is good but only for last few cores
print(1/deltas)
print(ks)
print(boundFro/numpy.sqrt(ks[:-1]))
print(Latex.array1DToRow(eyes))
print(Latex.array1DToRow(numpy.sqrt(ks)))
print(Latex.array1DToRow(boundFro))
print(Latex.array1DToRow(bound2))
| gpl-3.0 |
xhongyi/toybrick | Plotting-script-bitvector/time_results/bar_plot-xlabel.py | 1 | 5787 | #!/usr/bin/python
import sys
import numpy as np
import matplotlib as mpl
from matplotlib.patches import Rectangle
mpl.use('pgf')
"""
parse CLI inputs
"""
OUTPUT_FILE = sys.argv[1]
INPUT_FILE = sys.argv[2]
NUM_EDITS = sys.argv[3]
print "Starting work on " + OUTPUT_FILE + ".{pdf,pgf}. This may take 10+ minutes, be patient."
def figsize(scale):
fig_width_pt = 469.755 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = .16#(np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def savefig(filename):
plt.savefig('{}.pgf'.format(filename))
plt.savefig('{}.pdf'.format(filename))
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10, # LaTeX default is 10pt font.
"text.fontsize": 10,
"legend.fontsize": 8, # Make the legend/label fonts a little smaller
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"figure.figsize": figsize(0.9), # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
mpl.rcParams.update(pgf_with_latex)
mpl.rcParams.update({'font.size': 22})
import matplotlib.pyplot as plt
"""
used to define a new figure with settingin pgf_with_latex
"""
def newfig(width):
plt.clf()
fig = plt.figure(figsize=figsize(width))
ax = fig.add_subplot(111)
return fig, ax
"""
this function will label the tops of the bar with their height (near end of code are example calls)
"""
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),ha='center', va='bottom')
"""
open files and read in data
"""
f = open(INPUT_FILE, 'r')
f_data = f.read().strip('\n').split('\n')[1:]
shd_data = []
seqan_data = []
swps_data = []
af_data = []
for line in f_data:
times = line.split('\t')
shd_data.append(float(times[1]))
seqan_data.append(float(times[0]))
swps_data.append(float(times[2]))
af_data.append(float(times[3]))
"""
set information for the bar plot and create the rectangles
"""
N = len(shd_data)
ind = np.arange(N) # the x locations for the groups
width = .20 # the width of the bars
fig, ax = newfig(1.0)
rects1 = ax.bar(ind, shd_data, width, color='black')
rects2 = ax.bar(ind+width, seqan_data, width, color='r', hatch='/')
rects3 = ax.bar(ind+2*width, af_data, width, color='#33CC33', hatch='.')
rects4 = ax.bar(ind+3*width, swps_data, width, color='#6666FF', hatch='-')
"""
COMMENTED OUT AXES INFORMATION TO MAKE PLOT MORE COMPACT
"""
'''# xtick mark only on fitfh plot
ax.set_ylabel('Time (sec)')
if(int(NUM_EDITS) == 5): # adds xlabel to the fifth plot
fig.subplots_adjust(bottom=0.275, top=.95, right=.99, left=.125)
ax.set_xlabel('Benchmarks')
plt.xticks(rotation=-25)
ax.set_xticklabels( ('ERR240726', 'ERR240727', 'ERR240728', 'ERR240729', 'ERR240730', 'ERR240731', 'ERR240732', 'ERR240733', 'ERR240734', 'ERR240735') )
else:
fig.subplots_adjust(bottom=0.05, top=.95, right=.99, left=.125)
'''
# xtick marks on every plot
ax.set_xlabel('Benchmarks')
plt.xticks(rotation=-25)
ax.set_xticklabels( ('ERR240726', 'ERR240727', 'ERR240728', 'ERR240729', 'ERR240730', 'ERR240731', 'ERR240732', 'ERR240733', 'ERR240734', 'ERR240735') )
'''
plt.tick_params(\
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
'''
fig.subplots_adjust(bottom=0.6, top=.95, right=.99, left=.125)
'''
if(int(NUM_EDITS) == 1):
ax.text(.1,.99*ymax, str(NUM_EDITS) + " errors tolerated")
else:
ax.text(.1,.90*ymax, str(NUM_EDITS) + " errors tolerated")
'''
'''
if(int(NUM_EDITS) == 1):
ax.set_title(NUM_EDITS + " Edit Tolerated Between Read and Reference")
else:
ax.set_title(NUM_EDITS + " Edits Tolerated Between Read and Reference")
'''
"""
Set the only 3 ytick marks
"""
ymax = 10000*(int(max(swps_data) / 10000) + 1)
ax.set_yticks( (0, ymax/2, ymax) )
"""
Set the x lim based on number of rectangles and distance between xtick marks
"""
plt.xlim([-width,width*(5*len(shd_data) + 8)])
ax.set_xticks(ind+4*width)
"""
Plot the rectangle and set legend title based on number of erros
"""
'''if(int(NUM_EDITS) == 1):
legend = ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('SHD', 'Seqan', 'AF', 'Swps'), loc=1, title=str(NUM_EDITS) + " Error")
else:
legend = ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('SHD', 'Seqan', 'AF', 'Swps'), loc=1, title=str(NUM_EDITS) + " Errors")
plt.setp(legend.get_title(),fontsize='10')
'''
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
"""
output the pgf and pdflatex documents
"""
savefig(OUTPUT_FILE)
print "Done with " + OUTPUT_FILE + ".{pdf,pgf}"
| bsd-3-clause |
gwpy/gwpy | examples/timeseries/filter.py | 3 | 2797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Filtering a `TimeSeries` with a ZPK filter
Several data streams read from the LIGO detectors are whitened before being
recorded to prevent numerical errors when using single-precision data
storage.
In this example we read such `channel <gwpy.detector.Channel>` and undo the
whitening to show the physical content of these data.
"""
__author__ = "Duncan Macleod <[email protected]>"
__currentmodule__ = 'gwpy.timeseries'
# First, we import the `TimeSeries` and :meth:`~TimeSeries.get` the data:
from gwpy.timeseries import TimeSeries
white = TimeSeries.get(
'L1:OAF-CAL_DARM_DQ', 'March 2 2015 12:00', 'March 2 2015 12:30')
# Now, we can re-calibrate these data into displacement units by first applying
# a `highpass <TimeSeries.highpass>` filter to remove the low-frequency noise,
# and then applying our de-whitening filter in `ZPK <TimeSeries.zpk>` format
# with five zeros at 100 Hz and five poles at 1 Hz (giving an overall DC
# gain of 10 :sup:`-10`:
hp = white.highpass(4)
displacement = hp.zpk([100]*5, [1]*5, 1e-10)
# We can visualise the impact of the whitening by calculating the ASD
# `~gwpy.frequencyseries.FrequencySeries` before and after the filter,
whiteasd = white.asd(8, 4)
dispasd = displacement.asd(8, 4)
# and plotting:
from gwpy.plot import Plot
plot = Plot(whiteasd, dispasd, separate=True, sharex=True,
xscale='log', yscale='log')
# Here we have passed the two
# `spectra <gwpy.frequencyseries.FrequencySeries>` in order,
# then `separate=True` to display them on separate Axes, `sharex=True` to tie
# the `~matplotlib.axis.XAxis` of each of the `~gwpy.plot.Axes`
# together.
#
# Finally, we prettify our plot with some limits, and some labels:
plot.text(0.95, 0.05, 'Preliminary', fontsize=40, color='gray', # hide
ha='right', rotation=45, va='bottom', alpha=0.5) # hide
plot.axes[0].set_ylabel('ASD [whitened]')
plot.axes[1].set_ylabel(r'ASD [m/$\sqrt{\mathrm{Hz}}$]')
plot.axes[1].set_xlabel('Frequency [Hz]')
plot.axes[1].set_ylim(1e-20, 1e-15)
plot.axes[1].set_xlim(5, 4000)
plot.show()
| gpl-3.0 |
Shaswat27/scipy | scipy/cluster/hierarchy.py | 18 | 95902 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single' an optimized algorithm called SLINK is implemented,
which has :math:`O(n^2)` time complexity.
For methods 'complete', 'average', 'weighted' and 'ward' an algorithm
called nearest-neighbors chain is implemented, which too has time
complexity :math:`O(n^2)`.
For other methods a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median' and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is a user responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", `arXiv:1109.2378v1 <http://arxiv.org/abs/1109.2378v1>`_
, 2011.
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
return _hierarchy.slink(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
return _hierarchy.nn_chain(y, n, method_code)
else:
return _hierarchy.linkage(y, n, method_code)
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Returns clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally all singleton
and non-singleton clusters are in one group. If `n_clusters` or
`height` is given, the columns correspond to the columns of `n_clusters` or
`height`.
Examples
--------
>>> from scipy import cluster
>>> np.random.seed(23)
>>> X = np.random.randn(50, 4)
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]])
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.where(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows minus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e. a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (
leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e. setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which is
``['g', 'r', 'c', 'm', 'y', 'k']``).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in Scipy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,
... 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['g', 'b', 'b', 'b', 'b']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = ['g', 'r', 'c', 'm', 'y', 'k']
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
See Also
--------
linkage, set_link_color_palette
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# its label is either the empty string or the number of original
# observations belonging to cluster i.
if 2 * n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/io/stata.py | 2 | 77178 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
an once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://statsmodels.sourceforge.net/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. Note that Stata doesn't
support unicode. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>> for chunk in itr:
>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
return reader
return reader.read()
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format='%Y') + to_timedelta(days, unit='d')
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for
y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000)) for
d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = np.nan
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if com.is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if com.is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("fmt %s not understood" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters.
Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only alphanumerics and
underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and
upcast if needed. int64 data is not usable in Stata, and so it is
downcast to int32 whenever the value are in the int32 range, and
sidecast to float64 when larger than this range. If the int64 values
are outside of the range of those perfectly representable as float64 values,
a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the same
size if there is no loss in precision, other wise are upcast to a larger
type. uint64 is currently not supported since it is concerted to object in
a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
#type code.
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
(32768, 'L'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = \
{
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = \
{
'i': 252,
'f': 254,
'b': 251
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = \
{
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = \
{
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'L': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
def _decode_bytes(self, str, errors=None):
if compat.PY3 or self._encoding is not None:
return str.decode(self._encoding, errors)
else:
return str
class StataReader(StataParser):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
# format 117 or higher (XML like)
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117]:
raise ValueError("Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)")
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
self.path_or_buf.read(11) # </N><label>
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
self.data_label = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(19) # </label><timestamp>
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
self.time_stamp = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
self.path_or_buf.read(8) # <variable_lables>, throw away
seek_variable_labels = seek_value_label_names + (33*self.nvar) + 20 + 17
# Below is the original, correct code (per Stata sta format doc,
# although this is not followed in actual 117 dtas)
#seek_variable_labels = struct.unpack(
# self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
#self.path_or_buf.read(8) # </stata_dta>
#self.path_or_buf.read(8) # EOF
self.path_or_buf.seek(seek_vartypes)
typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
self.typlist = [None]*self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.typlist[i] = typ
#elif typ == 32768:
# raise ValueError("Long strings are not supported")
else:
self.typlist[i] = self.TYPE_MAP_XML[typ]
i += 1
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(typlist)))
self.dtyplist = [None]*self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.dtyplist[i] = str(typ)
else:
self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
i += 1
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(typlist)))
self.path_or_buf.seek(seek_varnames)
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(seek_formats)
self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_value_label_names)
self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
self.path_or_buf.seek(seek_variable_labels)
self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
# header
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 113, 114, 115]:
raise ValueError("Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)")
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
if self.format_version > 105:
self.data_label = self._null_terminate(self.path_or_buf.read(81))
else:
self.data_label = self._null_terminate(self.path_or_buf.read(32))
if self.format_version > 104:
self.time_stamp = self._null_terminate(self.path_or_buf.read(18))
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
typlist = [
self.OLD_TYPE_MAPPING[
self._decode_bytes(self.path_or_buf.read(1))
] for i in range(self.nvar)
]
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
if self.format_version > 113:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)]
elif self.format_version > 104:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(12))
for i in range(self.nvar)]
else:
self.fmtlist = [self._null_terminate(self.path_or_buf.read(7))
for i in range(self.nvar)]
if self.format_version > 108:
self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.lbllist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
if self.format_version > 105:
self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
self.vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x for x in self.fmtlist]
def _calcsize(self, fmt):
return (type(fmt) is int and fmt
or struct.calcsize(self.byteorder + fmt))
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None: # have bytes not strings,
# so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of variable label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of variable label table (format < 117)
labname = self._null_terminate(self.path_or_buf.read(33))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = []
for i in range(n):
off.append(struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0])
val = []
for i in range(n):
val.append(struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0])
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:])
)
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = dict()
while True:
if self.path_or_buf.read(3) != b'GSO':
break
v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
va = va[0:-1].decode(self._encoding or self._default_encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __iter__(self):
try:
if self._chunksize:
while True:
yield self.read(self._chunksize)
else:
yield self.read()
except StopIteration:
pass
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
self._read_value_labels()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data)==0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
data = self._do_select_columns(data, columns)
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if (dtype != np.dtype(object)) and (dtype != self.dtyplist[i]):
requires_type_conversion = True
data_formatted.append((col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
data[col] = _stata_elapsed_date_to_datetime_vec(data[col], self.fmtlist[i])
if convert_categoricals and self.value_label_dict:
data = self._do_convert_categoricals(data, self.value_label_dict, self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'L':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
matched = set()
for i, col in enumerate(data.columns):
if col in column_set:
matched.update([col])
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist, order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
cat_data.categories = categories
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self.vlblist))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
#if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError(
"convert_dates key is not in varlist and is not an int"
)
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(com._ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode')
or len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(com._ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array-like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
time_stamp : datetime
A date time to use when writing the file. Can be None, in which
case the current time is used.
dataset_label : str
A label for the data set. Should be 80 characters or smaller.
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._file = _open_file_binary_write(
fname, self._encoding or self._default_encoding
)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorigal columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [com.is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export int64-based '
'categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if dates
are exported, the variable name is propogated to the date conversion
dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append('{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
if self._convert_dates is not None:
self._convert_dates = _maybe_convert_to_int_keys(
self._convert_dates, self.varlist
)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c'+str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c'+str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| mit |
siou83/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
bryanbriney/abanalysis | ab_analysis.py | 2 | 13237 | #!/usr/bin/python
# filename: ab_analysis.py
###########################################################################
#
# Copyright (c) 2013 Bryan Briney. All rights reserved.
#
# @version: 1.0.0
# @author: Bryan Briney
# @props: IgBLAST team (http://www.ncbi.nlm.nih.gov/igblast/igblast.cgi)
# @license: MIT (http://opensource.org/licenses/MIT)
#
###########################################################################
import os
import time
import math
import glob
import platform
import argparse
import threading
from subprocess import Popen, PIPE
from multiprocessing import Pool, cpu_count
from Bio import SeqIO
from blast_parse import BlastParse
parser = argparse.ArgumentParser("Antibody annotation with IgBLAST.")
parser.add_argument('-i', '--in', dest='input', required=True,
help="The input file, to be split and processed in parallel. \
If a directory is given, all files in the directory will be iteratively processed.")
parser.add_argument('-o', '--out', dest='output', required=True,
help="The output directory, which will contain JSON or tab-delimited output files.")
parser.add_argument('-l', '--log', dest='log', default='',
help="The log file, to which the BlastParse log info will be written. \
Default is stdout.")
parser.add_argument('-t', '--temp', dest='temp_dir', default='',
help="The directory in which temp files will be stored. \
If the directory doesn't exist, it will be created. \
Defaults to './temp_files'.")
parser.add_argument('-p', '--threads', dest='num_threads', default=0, type=int,
help="Number of parallel igblastn instances to spawn. \
Defaults to max available processors.")
parser.add_argument('-v', '--tsv', dest="tsv_out", action='store_true', default=False,
help="NOT YET IMPLEMENTED. If set, the final output (from BlastParse) will be in tab-delimited format. \
Defaults to JSON output.")
parser.add_argument('-m', '--merge', dest="merge", action='store_true', default=False,
help="Use if the input files are paired-end FASTQs (either gzip compressed or uncompressed) \
from Illumina platforms. Prior to running IgBLAST, reads will be merged with pandaseq. \
Requires that pandaseq is installed.")
parser.add_argument('-n', '--next_seq', dest="next_seq", action='store_true', default=False,
help="Use if the run was performed on a NextSeq sequencer. \
Multiple lane files for the same sample will be merged.")
parser.add_argument('-u', '--uaid', dest="uaid", type=int, default=None,
help="Use if the input files contain unique antibody identifiers (UAIDs). \
UAIDs will be identified and incorporated into the output JSON file.")
parser.add_argument('-b', '--basespace', dest="use_basespace", default=False, action='store_true',
help="NOT YET IMPLEMENTED. Use flag if files should be downloaded directly from BaseSpace. \
Files will be downloaded into the directory provided with the '-i' flag, which should be empty.")
parser.add_argument('-d', '--debug', dest="debug", action='store_true', default=False,
help="If set, will write all failed/exception sequences to file and give more informative errors.")
parser.add_argument('-s', '--species', dest='species', default='human',
choices=['human', 'macaque', 'mouse'])
args = parser.parse_args()
class launch_thread(threading.Thread):
def __init__(self, in_file, out_file):
threading.Thread.__init__(self)
self.in_file = in_file
self.out_file = out_file
binary = './igblastn_' + platform.system().lower()
self.cmd = '{3} -germline_db_V database/{0}_gl_V -germline_db_J database/{0}_gl_J -germline_db_D database/{0}_gl_D ' + \
'-organism {0} -domain_system imgt -auxiliary_data optional_file/{0}_gl.aux ' + \
'-show_translation -outfmt 3 -num_alignments_V 1 -num_alignments_D 1 -num_alignments_J 1 ' + \
'-query {1} -out {2}'.format(args.species, self.in_file, self.out_file, binary)
def run(self):
p = Popen(self.cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
#####################################################################
#
# FILES AND DIRECTORIES
#
#####################################################################
def build_temp_dirs():
if args.temp_dir != '':
temp_directory = args.temp_dir
else:
temp_directory = "./temp_files"
temp_out_directory = temp_directory + "/temp_output"
if not os.path.exists(temp_directory): os.mkdir(temp_directory)
if not os.path.exists(temp_out_directory): os.mkdir(temp_out_directory)
return temp_directory, temp_out_directory
def build_output_dir():
output_dir = args.output
if not os.path.exists(output_dir): os.mkdir(output_dir)
return output_dir
def list_files(d):
if os.path.isdir(d):
expanded_dir = os.path.expanduser(d)
return sorted(glob.glob(expanded_dir + '/*'))
else:
return [d,]
def file_length(file):
c = 0
with open(file) as f:
for i, l in enumerate(f):
if l[0] == '>': c += 1
return c
def num_procs():
if args.num_threads > 0:
return args.num_threads
return cpu_count()
def clean_up(temp_files, temp_out_files, temp_directory, temp_out_directory):
for file in temp_out_files:
os.remove(file)
os.rmdir(temp_out_directory)
for file in temp_files:
os.remove(file)
os.rmdir(temp_directory)
def file_splitter(file, splitlen, num_seqs, temp_directory):
counter = 1
files_list = []
lines = open(file, 'r').read().replace(' ', '_').split('>')
for line in range(0, num_seqs+1, splitlen):
output = lines[line:line+splitlen]
temp_filename = temp_directory + "/tempfile_" + str(counter)
files_list.append(temp_filename)
open(temp_filename, "w").write("")
temp_file = open(temp_directory + "/tempfile_" + str(counter), "a")
if counter == 1:
temp_file.write('>'.join(output))
counter += 1
else:
temp_file.write('>' + '>'.join(output))
counter += 1
temp_file.close()
return files_list
#####################################################################
#
# PRINTING
#
#####################################################################
def print_input_info(i):
print ''
print ''
print '========================================'
print 'Parallel IgBLAST'
print '========================================'
print ''
if len(i) > 1:
print 'Input is a directory of {} files.\n\n'.format(len(i))
else:
print 'Input is a single file.\n\n'
def print_infile(i):
b = os.path.basename(i)
print '-'*len(b)
print b
print '-'*len(b)
def print_summary_output(g, e, f, blast_time, parse_time):
total_seqs = g+e+f
print ''
print 'Out of {} total sequences:'.format(total_seqs)
print '{} sequences processed normally'.format(g)
print '{} sequences passed sanity checks, but could not be processed'.format(e)
print '{} sequences failed sanity checks are were not processed'.format(f)
print ''
print 'IgBLAST took {0} seconds ({1} sequences per second)'.format(blast_time, total_seqs/blast_time)
print 'parsing took {0} seconds ({1} sequences per second)'.format(parse_time, total_seqs/parse_time)
print ''
#####################################################################
#
# PARSING
#
#####################################################################
def line_generator(blast_file):
f = open(blast_file, 'r')
for line in f:
yield line
def block_generator(blast_file):
l = line_generator(blast_file)
line = next(l)
while line.find('Query= ') == -1: line = next(l)
block = line.replace('Query= ', '')
while True:
try:
line = next(l)
while line.find('Query= ') == -1:
block += line
line = next(l)
yield block
block = line.replace('Query= ', '')
except StopIteration:
yield block
break
raise StopIteration
def do_parse(blastout):
out_file = blastout + '.json'
result = []
pool = Pool(processes=cpu_count())
for i in block_generator(blastout):
try:
if args.debug:
result.append(parser(i))
else:
result.append(pool.apply_async(parser, (i,)))
except StopIteration:
break
pool.close()
pool.join()
good, exceptions, failed = process_parse_data(result, out_file)
result = []
return good, exceptions, failed
def parser(i):
bp = BlastParse(i, species=args.species, tsv=args.tsv_out, log=args.log, debug=args.debug, uaid=args.uaid)
if bp.sanity_checks() < 1:
output = bp.parse()
return output
else:
return ['', '', i]
def process_parse_data(results, out_file):
good = 0
exceptions = 0
failed = 0
r_handle = build_result_handle(out_file)
if args.debug:
e_handle = build_exception_handle(out_file)
f_handle = build_failed_handle(out_file)
for result in results:
if args.debug:
if result[0] != '':
r_handle.write(result[0])
good += 1
elif result[1] != '':
e_handle.write(result[1])
exceptions += 1
elif result[2] != '':
f_handle.write(result[2])
failed += 1
else:
r = result.get()
if r[0] != '':
r_handle.write(r[0])
good += 1
elif r[1] != '': exceptions += 1
elif r[2] != '': failed += 1
return good, exceptions, failed
def build_result_handle(out_file):
open(out_file, 'w').write('')
return open(out_file, 'a')
def build_exception_handle(out_file):
e_file = out_file.split('.')[0] + '_exceptions'
open(e_file, 'w').write('')
return open(e_file, 'a')
def build_failed_handle(out_file):
f_file = out_file.split('.')[0] + '_failed'
open(f_file, 'w').write('')
return open(f_file, 'a')
#####################################################################
#
# INPUT PROCESSING
#
#####################################################################
def check_input(input_list):
format = format_check(input_list[0])
if format == 'fasta':
return input_list
else:
return convert_to_fasta(input_list)
def format_check(in_file):
with open(in_file) as f:
line = f.next()
while line == '':
line = f.next()
if line.startswith('>'):
return 'fasta'
elif line.startswith('@'):
return 'fastq'
else:
raise RuntimeError('Input files must be in either FASTA or FASTQ format.')
def convert_to_fasta(input_list):
fasta_dir = args.input + 'fastas/'
if not os.path.exists(fasta_dir):
os.mkdir(fasta_dir)
for f in input_list:
out_file = os.path.join(fasta_dir, os.path.basename(f).split('.')[0])
open(out_file, 'w').write('')
out_handle = open(out_file, 'a')
for s in SeqIO.parse(f, 'fastq'):
out_handle.write('>{0}\n{1}\n'.format(s.id, str(s.seq)))
return list_files(fasta_dir)
def merge_reads():
import pandaseq
merge_dir = args.input + 'merged_reads/'
if not os.path.exists(merge_dir):
os.mkdir(merge_dir)
pandaseq.run(args.input, merge_dir, nextseq=args.next_seq)
return list_files(merge_dir)
def preprocess(files):
import pre_processing
processed_dir = args.input + 'processed/'
if not os.path.exists(processed_dir):
os.mkdir(processed_dir)
pre_processing.run(files, processed_dir)
return list_files(processed_dir)
def download_files():
from basespace import BaseSpace
bs = BaseSpace()
bs.download(args.input)
args.merge = True
#####################################################################
#
# IgBLAST
#
#####################################################################
def do_igblast(i, out_dir):
o_prefix = os.path.basename(i).split('.')[0]
o = os.path.join(out_dir, o_prefix + '_blastout')
# parallel IgBLASTn
blast_start = time.time()
blastout = parallel_igblast(i,o)
blast_end = time.time()
blast_time = blast_end - blast_start
# parse the IgBLASTn output
parse_start = time.time()
good_seqs, exc_seqs, failed_seqs = do_parse(blastout)
parse_end = time.time()
parse_time = parse_end - parse_start
print_summary_output(good_seqs, exc_seqs, failed_seqs, blast_time, parse_time)
def parallel_igblast(in_file, out_file):
num_seqs = file_length(in_file)
threads = num_procs()
split_length = int(math.ceil(float(num_seqs) / threads))
temp_directory, temp_out_directory = build_temp_dirs()
split_files = file_splitter(in_file, split_length, num_seqs, temp_directory)
thread_list = []
blastout_list = []
# run IgBLASTn in parallel
for f in split_files:
temp_out_file = os.path.join(temp_out_directory, os.path.basename(f).split('.')[0] + "_blastout")
t = launch_thread(f, temp_out_file)
t.start()
thread_list.append(t)
blastout_list.append(temp_out_file)
for thread in thread_list:
thread.join()
# combine all blastout files into a single output file
open(out_file, 'w').write('')
with open(out_file, 'w') as out_handle:
for f in blastout_list:
with open(f) as in_handle:
for line in in_handle:
out_handle.write(line)
clean_up(split_files, blastout_list, temp_directory, temp_out_directory)
return out_file
def main():
# if args.use_basespace:
# download_files()
if args.merge:
input_list = merge_reads()
else:
input_list = list_files(args.input)
input_list = check_input(input_list)
input_list = preprocess(input_list)
print_input_info(input_list)
output_dir = build_output_dir()
for i in input_list:
if os.path.isfile(i):
print_infile(i)
do_igblast(i, output_dir)
if __name__ == '__main__':
main()
| mit |
fredhusser/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
uzh-rpg/rpg_svo | svo_analysis/src/svo_analysis/analyse_logs.py | 17 | 3497 | #!/usr/bin/python
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
def analyse_logs(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_kf = np.argwhere( (D['dropout'] == 1) & (D['repr_n_mps'] >= 0))
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
is_nokf = np.argwhere( (D['dropout'] == 0) & (D['repr_n_mps'] >= 0))
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot number of reprojected points
mean_n_reproj_points = np.mean(D['repr_n_mps'][is_frame]);
mean_n_reproj_matches = np.mean(D['repr_n_new_references'][is_frame]);
mean_n_edges_final = np.mean(D['sfba_n_edges_final'][is_frame]);
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['repr_n_mps'][is_frame], 'r-',
label='Reprojected Points, avg = %.2f'%mean_n_reproj_points)
ax.plot(D['timestamp'][is_frame], D['repr_n_new_references'][is_frame], 'b-',
label='Feature Matches, avg = %.2f'%mean_n_reproj_matches)
ax.plot(D['timestamp'][is_frame], D['sfba_n_edges_final'][is_frame], 'g-',
label='Points after Optimization, avg = %.2f'%mean_n_edges_final)
ax.set_ylim(bottom=0)
ax.legend(loc='lower right')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'num_reprojected.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot median error before and after pose-optimzation and bundle adjustment
init_error_avg = np.mean(D['sfba_error_init'][is_frame])
opt1_avg = np.mean(D['sfba_error_final'][is_frame])
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='error [px]')
ax.plot(D['timestamp'][is_frame], D['sfba_error_init'][is_frame], 'r-', label='Initial error')
ax.plot(D['timestamp'][is_frame], D['sfba_error_final'][is_frame], 'b-', label='Final error')
ax.legend(ncol=2)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'reprojection_error.pdf'), bbox_inches="tight")
print 'average reprojection error improvement: ' + str(init_error_avg - opt1_avg)
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['n_candidates'][is_frame], 'r-', label='Candidate Points')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'candidate_points.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='px')
ax.plot(D['timestamp'][is_frame], D['sfba_thresh'][is_frame], 'r-', label='Threshold')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'optimization_thresh.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# write other statistics to file
stat = {'num_frames': len(is_frame),
'num_kfs': len(is_kf),
'reproj_error_avg_improvement': float(init_error_avg - opt1_avg)}
with open(os.path.join(trace_dir,'dataset_stats.yaml'),'w') as outfile:
outfile.write(yaml.dump(stat, default_flow_style=False))
| gpl-3.0 |
j-faria/OPEN | OPEN/macros/lp_pdf.py | 1 | 2237 | # -*- coding: utf-8 -*-
# 'run -i' this script:
magic = get_ipython().magic
import datetime
import glob
import os
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from OPEN.periodograms import gls
from OPEN.utils import day2year, rms
from OPEN.tqdm import tqdm
all_files = glob.glob('/home/joao/phd/data/*mean*')
print 'Total: ', len(all_files), ' files'
with PdfPages('lp_data.pdf') as pdf:
for filename in tqdm(all_files[0:10]):
# print filename
print
magic('read ' + filename + ' --skip=2 -d')
N = len(default.time)
if N < 5:
print "LESS THAN 5 MEASUREMENTS"
continue # skip this star
per = gls(default) # calculate periodogram
per1 = gls(default, quantity='fwhm')
per2 = gls(default, quantity='rhk')
tspan = max(default.time) - min(default.time)
rms_over_err = rms(default.vrad - default.vrad.mean()) / default.error.mean()
plt.figure(figsize=(8,10))
gs = gridspec.GridSpec(4, 2)
# plot the data
ax1 = plt.subplot(gs[0])
ax1.ticklabel_format(useOffset=False)
default.do_plot_obs(newFig=False, leg=False)
plt.locator_params(nbins=4)
# info
ax2 = plt.subplot(gs[0,1])
plt.axis('off')
plt.text(0., 0.9, os.path.basename(filename))
plt.text(0., 0.68, '# points %d' % N)
plt.text(0., 0.5, 'time span %3.1f years' % day2year(tspan))
plt.text(0., 0.3, 'RV rms / <err> %3.2f' % rms_over_err)
# plt.text(0.5, 0.5, '# meas. %d' % N)
# plot the periodogram
ax3 = plt.subplot(gs[1, :])
per._plot(doFAP=True, newFig=False, axes=ax3)
# the other periogorams
try:
ax4 = plt.subplot(gs[2, :])
per1._plot(newFig=False, axes=ax4)
plt.title('FWHM')
ax5 = plt.subplot(gs[3, :])
per2._plot(newFig=False, axes=ax5)
plt.title('RHK')
except ValueError:
print 'SOME ERROR OCCURRED...'
continue
pdf.savefig()
plt.close()
# We can also set the file's metadata via the PdfPages object:
d = pdf.infodict()
d['Title'] = 'LP-metal-poor-data'
d['Author'] = u'João Faria'
d['Subject'] = 'Subject'
d['Keywords'] = 'LP metal poor data gls joao faria'
d['CreationDate'] = datetime.datetime.today()
d['ModDate'] = datetime.datetime.today()
| mit |
pv/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
0todd0000/spm1d | spm1d/rft1d/examples/val_broken_3_T2.py | 1 | 1887 |
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
def here_hotellingsT2(y):
N = y.shape[0]
m = np.matrix( y.mean(axis=0) )
T2 = []
for ii,mm in enumerate(m):
W = np.matrix( np.cov(y[:,ii,:].T, ddof=1) ) #estimated covariance
t2 = N * mm * np.linalg.inv(W) * mm.T
T2.append( float(t2) )
return np.asarray(T2)
#(0) Set parameters:
np.random.seed(0)
nResponses = 25
nNodes = 101
nComponents = 2
nIterations = 200 #raise this to at least 500 for convergence
FWHM = 12.0
W0 = np.eye(nComponents)
### derived parameters:
df = nComponents, nResponses-1 #p,m
### generate a field mask:
nodes = np.array([True]*nNodes) #nothing masked out
nodes[10:30] = False #this region will be masked out
nodes[60:85] = False
#(1) Generate Gaussian 1D fields, compute test stat:
generator = rft1d.random.GeneratorMulti1D(nResponses, nodes, nComponents, FWHM, W0)
T2 = []
for i in range(nIterations):
y = generator.generate_sample()
t2 = here_hotellingsT2(y)
T2.append( np.nanmax(t2) )
T2 = np.array(T2)
#(2) Survival functions for field maximum:
heights = np.linspace(8.0, 15, 21)
sf = np.array( [ (T2>=h).mean() for h in heights] )
sfE_full = rft1d.T2.sf(heights, df, nNodes, FWHM) #theoretical (full)
sfE_broken = rft1d.T2.sf(heights, df, nodes, FWHM) #theoretical (broken)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sfE_full, 'b-', label='Theoretical (full)')
ax.plot(heights, sfE_broken, 'r-', label='Theoretical (broken)')
ax.plot(heights, sf, 'ro', label='Simulated (broken)')
ax.set_xlabel('x', size=16)
ax.set_ylabel('$P (T^2_\mathrm{max} > x)$', size=20)
ax.legend()
ax.set_title('Broken field validation ($T^2$)', size=20)
pyplot.show()
| gpl-3.0 |
Aureliu/keras | examples/kaggle_otto_nn.py | 70 | 3775 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| mit |
CallaJun/hackprince | indico/matplotlib/markers.py | 10 | 26324 | """
This module contains functions to handle markers. Used by both the
marker functionality of `~matplotlib.axes.Axes.plot` and
`~matplotlib.axes.Axes.scatter`.
All possible markers are defined here:
============================== ===============================================
marker description
============================== ===============================================
"." point
"," pixel
"o" circle
"v" triangle_down
"^" triangle_up
"<" triangle_left
">" triangle_right
"1" tri_down
"2" tri_up
"3" tri_left
"4" tri_right
"8" octagon
"s" square
"p" pentagon
"*" star
"h" hexagon1
"H" hexagon2
"+" plus
"x" x
"D" diamond
"d" thin_diamond
"|" vline
"_" hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
"None" nothing
None nothing
" " nothing
"" nothing
``'$...$'`` render the string using mathtext.
`verts` a list of (x, y) pairs used for Path vertices.
The center of the marker is located at (0,0) and
the size is normalized.
path a `~matplotlib.path.Path` instance.
(`numsides`, `style`, `angle`) see below
============================== ===============================================
The marker can also be a tuple (`numsides`, `style`, `angle`), which
will create a custom, regular symbol.
`numsides`:
the number of sides
`style`:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (`numsides` and `angle` is ignored)
===== =============================================
`angle`:
the angle of rotation of the symbol, in degrees
For backward compatibility, the form (`verts`, 0) is also accepted,
but it is equivalent to just `verts` for giving a raw set of vertices
that define the shape.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from .cbook import is_math_text, is_string_like, is_numlike, iterable
from matplotlib import rcParams
from .path import Path
from .transforms import IdentityTransform, Affine2D
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = list(xrange(8))
class MarkerStyle(object):
markers = {
'.': 'point',
',': 'pixel',
'o': 'circle',
'v': 'triangle_down',
'^': 'triangle_up',
'<': 'triangle_left',
'>': 'triangle_right',
'1': 'tri_down',
'2': 'tri_up',
'3': 'tri_left',
'4': 'tri_right',
'8': 'octagon',
's': 'square',
'p': 'pentagon',
'*': 'star',
'h': 'hexagon1',
'H': 'hexagon2',
'+': 'plus',
'x': 'x',
'D': 'diamond',
'd': 'thin_diamond',
'|': 'vline',
'_': 'hline',
TICKLEFT: 'tickleft',
TICKRIGHT: 'tickright',
TICKUP: 'tickup',
TICKDOWN: 'tickdown',
CARETLEFT: 'caretleft',
CARETRIGHT: 'caretright',
CARETUP: 'caretup',
CARETDOWN: 'caretdown',
"None": 'nothing',
None: 'nothing',
' ': 'nothing',
'': 'nothing'
}
# Just used for informational purposes. is_filled()
# is calculated in the _set_* functions.
filled_markers = (
'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd')
fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
_half_fillstyles = ('left', 'right', 'bottom', 'top')
# TODO: Is this ever used as a non-constant?
_point_size_reduction = 0.5
def __init__(self, marker=None, fillstyle='full'):
"""
MarkerStyle
Attributes
----------
markers : list of known markes
fillstyles : list of known fillstyles
filled_markers : list of known filled markers.
Parameters
----------
marker : string or array_like, optional, default: None
See the descriptions of possible markers in the module docstring.
fillstyle : string, optional, default: 'full'
'full', 'left", 'right', 'bottom', 'top', 'none'
"""
self._fillstyle = fillstyle
self.set_marker(marker)
self.set_fillstyle(fillstyle)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_marker_function')
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
self.set_marker(self._marker)
self._recache()
def _recache(self):
self._path = Path(np.empty((0, 2)))
self._transform = IdentityTransform()
self._alt_path = None
self._alt_transform = None
self._snap_threshold = None
self._joinstyle = 'round'
self._capstyle = 'butt'
self._filled = True
self._marker_function()
if six.PY3:
def __bool__(self):
return bool(len(self._path.vertices))
else:
def __nonzero__(self):
return bool(len(self._path.vertices))
def is_filled(self):
return self._filled
def get_fillstyle(self):
return self._fillstyle
def set_fillstyle(self, fillstyle):
"""
Sets fillstyle
Parameters
----------
fillstyle : string amongst known fillstyles
"""
if fillstyle not in self.fillstyles:
raise ValueError("Unrecognized fillstyle %s"
% ' '.join(self.fillstyles))
self._fillstyle = fillstyle
self._recache()
def get_joinstyle(self):
return self._joinstyle
def get_capstyle(self):
return self._capstyle
def get_marker(self):
return self._marker
def set_marker(self, marker):
if (iterable(marker) and len(marker) in (2, 3) and
marker[1] in (0, 1, 2, 3)):
self._marker_function = self._set_tuple_marker
elif isinstance(marker, np.ndarray):
self._marker_function = self._set_vertices
elif not isinstance(marker, list) and marker in self.markers:
self._marker_function = getattr(
self, '_set_' + self.markers[marker])
elif is_string_like(marker) and is_math_text(marker):
self._marker_function = self._set_mathtext_path
elif isinstance(marker, Path):
self._marker_function = self._set_path_marker
else:
try:
Path(marker)
self._marker_function = self._set_vertices
except ValueError:
raise ValueError('Unrecognized marker style {}'.format(marker))
self._marker = marker
self._recache()
def get_path(self):
return self._path
def get_transform(self):
return self._transform.frozen()
def get_alt_path(self):
return self._alt_path
def get_alt_transform(self):
return self._alt_transform.frozen()
def get_snap_threshold(self):
return self._snap_threshold
def _set_nothing(self):
self._filled = False
def _set_custom_marker(self, path):
verts = path.vertices
rescale = max(np.max(np.abs(verts[:, 0])),
np.max(np.abs(verts[:, 1])))
self._transform = Affine2D().scale(0.5 / rescale)
self._path = path
def _set_path_marker(self):
self._set_custom_marker(self._marker)
def _set_vertices(self):
verts = self._marker
marker = Path(verts)
self._set_custom_marker(marker)
def _set_tuple_marker(self):
marker = self._marker
if is_numlike(marker[0]):
if len(marker) == 2:
numsides, rotation = marker[0], 0.0
elif len(marker) == 3:
numsides, rotation = marker[0], marker[2]
symstyle = marker[1]
if symstyle == 0:
self._path = Path.unit_regular_polygon(numsides)
self._joinstyle = 'miter'
elif symstyle == 1:
self._path = Path.unit_regular_star(numsides)
self._joinstyle = 'bevel'
elif symstyle == 2:
self._path = Path.unit_regular_asterisk(numsides)
self._filled = False
self._joinstyle = 'bevel'
elif symstyle == 3:
self._path = Path.unit_circle()
self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
else:
verts = np.asarray(marker[0])
path = Path(verts)
self._set_custom_marker(path)
def _set_mathtext_path(self):
"""
Draws mathtext markers '$...$' using TextPath object.
Submitted by tcb
"""
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
# again, the properties could be initialised just once outside
# this function
# Font size is irrelevant here, it will be rescaled based on
# the drawn size later
props = FontProperties(size=1.0)
text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,
usetex=rcParams['text.usetex'])
if len(text.vertices) == 0:
return
xmin, ymin = text.vertices.min(axis=0)
xmax, ymax = text.vertices.max(axis=0)
width = xmax - xmin
height = ymax - ymin
max_dim = max(width, height)
self._transform = Affine2D() \
.translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \
.scale(1.0 / max_dim)
self._path = text
self._snap = False
def _half_fill(self):
fs = self.get_fillstyle()
result = fs in self._half_fillstyles
return result
def _set_circle(self, reduction=1.0):
self._transform = Affine2D().scale(0.5 * reduction)
self._snap_threshold = 6.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_circle()
else:
# build a right-half circle
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._path = self._alt_path = Path.unit_circle_righthalf()
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform.frozen().rotate_deg(180.)
def _set_pixel(self):
self._path = Path.unit_rectangle()
# Ideally, you'd want -0.5, -0.5 here, but then the snapping
# algorithm in the Agg backend will round this to a 2x2
# rectangle from (-1, -1) to (1, 1). By offsetting it
# slightly, we can force it to be (0, 0) to (1, 1), which both
# makes it only be a single pixel and places it correctly
# aligned to 1-width stroking (i.e. the ticks). This hack is
# the best of a number of bad alternatives, mainly because the
# backends are not aware of what marker is actually being used
# beyond just its path data.
self._transform = Affine2D().translate(-0.49999, -0.49999)
self._snap_threshold = None
def _set_point(self):
self._set_circle(reduction=self._point_size_reduction)
_triangle_path = Path(
[[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
# Going down halfway looks to small. Golden ratio is too far.
_triangle_path_u = Path(
[[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_d = Path(
[[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],
[-3 / 5., -1 / 5.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_l = Path(
[[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_r = Path(
[[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
def _set_triangle(self, rot, skip):
self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._triangle_path
else:
mpaths = [self._triangle_path_u,
self._triangle_path_l,
self._triangle_path_d,
self._triangle_path_r]
if fs == 'top':
self._path = mpaths[(0 + skip) % 4]
self._alt_path = mpaths[(2 + skip) % 4]
elif fs == 'bottom':
self._path = mpaths[(2 + skip) % 4]
self._alt_path = mpaths[(0 + skip) % 4]
elif fs == 'left':
self._path = mpaths[(1 + skip) % 4]
self._alt_path = mpaths[(3 + skip) % 4]
else:
self._path = mpaths[(3 + skip) % 4]
self._alt_path = mpaths[(1 + skip) % 4]
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_triangle_up(self):
return self._set_triangle(0.0, 0)
def _set_triangle_down(self):
return self._set_triangle(180.0, 2)
def _set_triangle_left(self):
return self._set_triangle(90.0, 3)
def _set_triangle_right(self):
return self._set_triangle(270.0, 1)
def _set_square(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 2.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
# build a bottom filled square out of two rectangles, one
# filled. Use the rotation to support left, right, bottom
# or top
if fs == 'bottom':
rotate = 0.
elif fs == 'top':
rotate = 180.
elif fs == 'left':
rotate = 270.
else:
rotate = 90.
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
[0.0, 0.5], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
[0.0, 1.0], [0.0, 0.5]])
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_diamond(self):
self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],
[1.0, 1.0], [0.0, 0.0]])
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_thin_diamond(self):
self._set_diamond()
self._transform.scale(0.6, 1.0)
def _set_pentagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(5)
fs = self.get_fillstyle()
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
y = (1 + np.sqrt(5)) / 4.
top = Path([verts[0], verts[1], verts[4], verts[0]])
bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])
left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_star(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_star(5, innerCircle=0.381966)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))
bottom = Path(np.vstack((verts[3:8, :], verts[3])))
left = Path(np.vstack((verts[0:6, :], verts[0])))
right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'bevel'
def _set_hexagon1(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x = np.abs(np.cos(5 * np.pi / 6.))
top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))
bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))
left = Path(verts[(0, 1, 2, 3), :])
right = Path(verts[(0, 5, 4, 3), :])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_hexagon2(self):
self._transform = Affine2D().scale(0.5).rotate_deg(30)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x, y = np.sqrt(3) / 4, 3 / 4.
top = Path(verts[(1, 0, 5, 4, 1), :])
bottom = Path(verts[(1, 2, 3, 4), :])
left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],
[-x, -y], [x, y])))
right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_octagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(8)
if not self._half_fill():
self._transform.rotate_deg(22.5)
self._path = polypath
else:
x = np.sqrt(2.) / 4.
half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],
[-1, -x], [-x, -1], [0, -1]])
if fs == 'bottom':
rotate = 90.
elif fs == 'top':
rotate = 270.
elif fs == 'right':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._path = self._alt_path = half
self._alt_transform = self._transform.frozen().rotate_deg(180.0)
self._joinstyle = 'miter'
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _set_vline(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
def _set_hline(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _set_tickleft(self):
self._transform = Affine2D().scale(-1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
def _set_tickright(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _set_tickup(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
def _set_tickdown(self):
self._transform = Affine2D().scale(1.0, -1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_plus(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._plus_path
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_tri_down(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_up(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_left(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_right(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _set_caretdown(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretup(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretleft(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretright(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_x(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._x_path
| lgpl-3.0 |
lcharleux/compmod | doc/sandbox/awa-pascale/compart_classes.py | 1 | 5764 | from abapy import materials
from abapy.misc import load
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
import pickle, copy, platform, compmod, os
from scipy import optimize, interpolate
def Tensile_Test(settings):
args = settings.copy()
# MATERIALS CREATION
Ne = settings['Nx'] * settings['Ny']
if settings['is_3D']: Ne *= settings['Nz']
if settings['compart']:
E = settings["E"] * np.ones(Ne) # Young's modulus
nu = settings["nu"] * np.ones(Ne) # Poisson's ratio
sy_mean = settings["sy_mean"] * np.ones(Ne)
sigma_sat = settings["sigma_sat"] * np.ones(Ne)
n_hol = settings["n_hol"] * np.ones(Ne)
n_bil = settings["n_bil"] * np.ones(Ne)
sy = compmod.distributions.Rayleigh(settings["sy_mean"]).rvs(Ne)
labels = ['mat_{0}'.format(i+1) for i in xrange(len(sy_mean))]
if args['material_type'] == "Bilinear":
args['material'] = [materials.Bilinear(labels = labels[i],
E = E[i], nu = nu[i], Ssat = sigma_sat[i],
n=n_bil[i], sy = sy[i]) for i in xrange(Ne)]
if args['material_type'] == "Hollomon":
args['material'] = [materials.Hollomon(labels = labels[i],
E = E[i], nu = nu[i], n=n_hol[i],
sy = sy[i]) for i in xrange(Ne)]
else:
labels = 'SAMPLE_MAT'
if args['material_type'] == "Bilinear":
args['material'] = materials.Bilinear(labels = labels,
E = settings["E"],
nu = settings["nu"],
sy = settings["sy_mean"],
Ssat = settings["sigma_sat"],
n = settings["n_bil"])
if args['material_type'] == "Hollomon":
args['material'] = materials.Hollomon(labels = labels,
E = settings["E"],
nu = settings["nu"],
sy = settings["sy_mean"],
n = settings["n_hol"])
m = compmod.models.CuboidTest(**args)
m.MakeInp()
m.Run()
m.MakePostProc()
m.RunPostProc()
m.LoadResults()
# Plotting results
if m.outputs['completed']:
# History Outputs
disp = np.array(m.outputs['history']['disp'].values()[0].data[0])
force = np.array(np.array(m.outputs['history']['force'].values()).sum().data[0])
volume = np.array(np.array(m.outputs['history']['volume'].values()).sum().data[0])
length = settings['ly'] + disp
surface = volume / length
logstrain = np.log10(1. + disp / settings['ly'])
linstrain = disp/ settings['ly']
strain = linstrain
stress = force / surface
output = {}
output["force"] = force
output["disp"] = disp
output["stress"] = stress
output["strain"] = strain
output["volume"] = volume
output["length"] = length
df = pd.DataFrame(output)
df.to_csv("{0}{1}.csv".format(settings["workdir"], settings["label"]), index = False)
df.to_excel("{0}{1}.xls".format(settings["workdir"], settings["label"]), index = False)
inputs = pd.DataFrame(settings, index = [0])
inputs.transpose().to_csv("{0}{1}_inputs.csv".format(settings["workdir"], settings["label"]))
class Optimize(object):
def __init__(self, settings):
settings = settings.copy()
settings['compart'] = True # True: compartimented, False: homogeneous
settings["material_type"] = "Bilinear" # "Bilinear" or "Hollomon"
self.settings = settings
self.inputs = []
self.sim_id = 1
exp = pd.read_csv(settings["expdir"] + settings["experiment"],
delim_whitespace = True)
exp.stress *= settings['exp_stress_factor']
exp.strain *= settings['exp_strain_factor']
# STRAIN GRID FOR LEAST SQUARE OPTIMIZATION
self.strain_grid = np.linspace(
max(settings["eps_lim_min"], exp.strain.min()),
min(settings["eps_lim_max"], exp.strain.max()),
100) # Strain grid
self.sigma_exp_grid = interpolate.interp1d(exp.strain, exp.stress)(self.strain_grid)
def Cost_Function(self, X):
settings = self.settings.copy()
settings["sy_mean"] = X[0] # [Pa] (only for bilinear)
settings["n_bil"] = X[1] # [Pa] Bilinear hardening
settings["sigma_sat"] = X[2] # [Pa]
# SIMULATION LABEL RENAMING
settings["label"] += "_{0}".format(self.sim_id)
Tensile_Test(settings)
sim = pd.read_csv(settings["workdir"] + settings["label"] + ".csv")
strain_grid = self.strain_grid
sigma_sim_grid = interpolate.interp1d(sim.strain, sim.stress)(strain_grid)
err = ((sigma_sim_grid - self.sigma_exp_grid)**2).sum()/len(strain_grid)
self.inputs.append([self.sim_id] + list(X))
self.sim_id +=1
return err
def run(self):
# EXPERIMENTAL DATA
settings = self.settings
# OPTIMiZATION START POINT
X0 = np.array([settings["sy_mean"], settings["n_bil"], settings['sigma_sat']])
sol_compart = optimize.minimize(self.Cost_Function, X0,
method = "Nelder-Mead",
options = {"maxfev": settings["max_number_of_simulations"],
"maxiter": settings["max_number_of_iterations"]})
self.sol = sol_compart
inputs = np.array(self.inputs).transpose()
df = pd.DataFrame({"sim_id": inputs[0],
"sy_mean":inputs[1],
"n_bil": inputs[2],
"sigma_sat":inputs[3],})
df.to_csv("{0}{1}_opti_results.csv".format(settings["workdir"], settings["label"]), index = False)
| gpl-2.0 |
jreback/pandas | pandas/tests/frame/apply/test_apply_relabeling.py | 4 | 3679 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestDataFrameNamedAggregate:
def test_agg_relabel(self):
# GH 26513
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
# simplest case with one column, one func
result = df.agg(foo=("B", "sum"))
expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"]))
tm.assert_frame_equal(result, expected)
# test on same column with different methods
result = df.agg(foo=("B", "sum"), bar=("B", "min"))
expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"]))
tm.assert_frame_equal(result, expected)
def test_agg_relabel_multi_columns_multi_methods(self):
# GH 26513, test on multiple columns with multiple methods
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
result = df.agg(
foo=("A", "sum"),
bar=("B", "mean"),
cat=("A", "min"),
dat=("B", "max"),
f=("A", "max"),
g=("C", "min"),
)
expected = pd.DataFrame(
{
"A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan],
"B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan],
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0],
},
index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]),
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_partial_functions(self):
# GH 26513, test on partial, functools or more complex cases
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min))
expected = pd.DataFrame(
{"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"])
)
tm.assert_frame_equal(result, expected)
result = df.agg(
foo=("A", min),
bar=("A", np.min),
cat=("B", max),
dat=("C", "min"),
f=("B", np.sum),
kk=("B", lambda x: min(x)),
)
expected = pd.DataFrame(
{
"A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0],
"C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan],
},
index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]),
)
tm.assert_frame_equal(result, expected)
def test_agg_namedtuple(self):
# GH 26513
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
result = df.agg(
foo=pd.NamedAgg("B", "sum"),
bar=pd.NamedAgg("B", min),
cat=pd.NamedAgg(column="B", aggfunc="count"),
fft=pd.NamedAgg("B", aggfunc="max"),
)
expected = pd.DataFrame(
{"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"])
)
tm.assert_frame_equal(result, expected)
result = df.agg(
foo=pd.NamedAgg("A", "min"),
bar=pd.NamedAgg(column="B", aggfunc="max"),
cat=pd.NamedAgg(column="A", aggfunc="max"),
)
expected = pd.DataFrame(
{"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]},
index=pd.Index(["foo", "bar", "cat"]),
)
tm.assert_frame_equal(result, expected)
def test_agg_raises(self):
# GH 26513
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
msg = "Must provide"
with pytest.raises(TypeError, match=msg):
df.agg()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/neural_network/tests/test_rbm.py | 17 | 6222 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
"""BernoulliRBM should work on small sparse matrices."""
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
"""Check if we don't get NaNs sampling the full digits dataset."""
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
jdoherty7/Adaptive_Interpolation | tests/vis.py | 1 | 1263 | import matplotlib.pyplot as plt
import numpy as np
D = [4, 6, 9, 13]
Gnd = np.array([ 3.6 , 3.11, 3.51, 0.93])
Mnd = np.array([10.81, 10.58, 10.52, 3.16])
Gcd = np.array([3.6, 3.47, 3.51, 1.74])
Mcd = np.array([7.84, 8.2, 7.96, 4.1])
plt.figure()
plt.title("Performance Total")
plt.subplot(211)
plt.ylabel("GFLOPS/s")
plt.plot(D, Gnd, c='b', label="store interval")
plt.plot(D, Gcd, c='r', label="calculate interval")
plt.subplot(212)
plt.ylabel("Memory Bandwidth (GB/s)")
plt.plot(D, Mnd, c='b', label="store interval")
plt.plot(D, Mcd, c='r', label="calculate interval")
plt.legend()
plt.show()
plt.figure()
plt.title("Performance Percent")
#plt.subplot(211)
#plt.ylabel("GFLOPS/s")
plt.ylabel("Percent of peak performance")
plt.plot(D, Gnd/35.2, c='b', label="store interval, GFLOPS")
plt.plot(D, Gcd/35.2, c='r', label="calculate interval, GFLOPS")
#plt.subplot(212)
#plt.ylabel("Memory Bandwidth (GB/s)")
plt.plot(D, Mnd/18.73, marker="o", c='b', label="store interval, MB")
plt.plot(D, Mcd/18.73, marker="o", c='r', label="calculate interval, MB")
#plt.plot(D, Gnd/35.2 + Mnd/18.73, marker="s", c='b', label="total store interval")
#plt.plot(D, Gcd/35.2 + Mcd/18.73, marker="s", c='r', label="total calculate interval")
plt.legend()
plt.show() | mit |
alberto-antonietti/nest-simulator | pynest/examples/clopath_synapse_spike_pairing.py | 12 | 5804 | # -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
----------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as plt
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` to the dictionary.
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[20., 120., 220., 320., 420.],
[20., 70., 120., 170., 220.],
[20., 53.3, 86.7, 120., 153.3],
[20., 45., 70., 95., 120.],
[20., 40., 60., 80., 100.],
# Presynaptic spike after the postsynaptic
[120., 220., 320., 420., 520., 620.],
[70., 120., 170., 220., 270., 320.],
[53.3, 86.6, 120., 153.3, 186.6, 220.],
[45., 70., 95., 120., 145., 170.],
[40., 60., 80., 100., 120., 140.]]
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[10., 110., 210., 310., 410.],
[10., 60., 110., 160., 210.],
[10., 43.3, 76.7, 110., 143.3],
[10., 35., 60., 85., 110.],
[10., 30., 50., 70., 90.],
[130., 230., 330., 430., 530., 630.],
[80., 130., 180., 230., 280., 330.],
[63.3, 96.6, 130., 163.3, 196.6, 230.],
[55., 80., 105., 130., 155., 180.],
[50., 70., 90., 110., 130., 150.]]
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for (s_t_pre, s_t_post) in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": resolution})
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", 1, {
"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", 1, {
"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={
"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder', 1)
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr})
syn_dict = {"synapse_model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
weights = wr.get("events", "weights")
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0*15.0*(syn_weights - init_w)/init_w + 100.0
# Plot results
fig1, axA = plt.subplots(1, sharex=False)
axA.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
axA.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
axA.set_ylabel("normalized weight change")
axA.set_xlabel("rho (Hz)")
axA.legend()
axA.set_title("synaptic weight")
plt.show()
| gpl-2.0 |
andrewcbennett/iris | lib/iris/tests/test_pandas.py | 1 | 17642 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import copy
import datetime
import unittest
import cf_units
import matplotlib.units
import netcdftime
import numpy as np
# Importing pandas has the side-effect of messing with the formatters
# used by matplotlib for handling dates.
default_units_registry = copy.copy(matplotlib.units.registry)
try:
import pandas
except ImportError:
# Disable all these tests if pandas is not installed.
pandas = None
matplotlib.units.registry = default_units_registry
skip_pandas = unittest.skipIf(pandas is None,
'Test(s) require "pandas", '
'which is not available.')
if pandas is not None:
from iris.coords import DimCoord
from iris.cube import Cube
import iris.pandas
import cf_units
@skip_pandas
class TestAsSeries(tests.IrisTest):
"""Test conversion of 1D cubes to Pandas using as_series()"""
def test_no_dim_coord(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'no_dim_coord.txt')))
def test_simple(self):
cube = Cube(np.array([0, 1, 2, 3, 4.4]), long_name="foo")
dim_coord = DimCoord([5, 6, 7, 8, 9], long_name="bar")
cube.add_dim_coord(dim_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series', 'simple.txt')))
def test_masked(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4.4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data.astype('f').filled(np.nan))
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series', 'masked.txt')))
def test_time_gregorian(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_coord = DimCoord([0, 100.1, 200.2, 300.3, 400.4],
long_name="time",
units="days since 2000-01-01 00:00")
cube.add_dim_coord(time_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'time_gregorian.txt')))
def test_time_360(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_unit = cf_units.Unit("days since 2000-01-01 00:00",
calendar=cf_units.CALENDAR_360_DAY)
time_coord = DimCoord([0, 100.1, 200.2, 300.3, 400.4],
long_name="time", units=time_unit)
cube.add_dim_coord(time_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'time_360.txt')))
def test_copy_true(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([0, 1, 2, 3.3, 4]), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
series = iris.pandas.as_series(cube, copy=False)
@skip_pandas
class TestAsDataFrame(tests.IrisTest):
"""Test conversion of 2D cubes to Pandas using as_data_frame()"""
def test_no_dim_coords(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_dim_coords.txt')))
def test_no_x_coord(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
y_coord = DimCoord([10, 11], long_name="bar")
cube.add_dim_coord(y_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_x_coord.txt')))
def test_no_y_coord(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
cube.add_dim_coord(x_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_y_coord.txt')))
def test_simple(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
y_coord = DimCoord([15, 16], long_name="milk")
cube.add_dim_coord(x_coord, 1)
cube.add_dim_coord(y_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'simple.txt')))
def test_masked(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data.astype('f').filled(np.nan))
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'masked.txt')))
def test_time_gregorian(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="ts")
day_offsets = [0, 100.1, 200.2, 300.3, 400.4]
time_coord = DimCoord(day_offsets, long_name="time",
units="days since 2000-01-01 00:00")
cube.add_dim_coord(time_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
nanoseconds_per_day = 24 * 60 * 60 * 1000000000
days_to_2000 = 365 * 30 + 7
# pandas Timestamp class cannot handle floats in pandas <v0.12
timestamps = [pandas.Timestamp(int(nanoseconds_per_day *
(days_to_2000 + day_offset)))
for day_offset in day_offsets]
self.assertTrue(all(data_frame.columns == timestamps))
self.assertTrue(all(data_frame.index == [0, 1]))
def test_time_360(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="ts")
time_unit = cf_units.Unit("days since 2000-01-01 00:00",
calendar=cf_units.CALENDAR_360_DAY)
time_coord = DimCoord([100.1, 200.2], long_name="time",
units=time_unit)
cube.add_dim_coord(time_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'time_360.txt')))
def test_copy_true(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=np.int32), long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=np.int64), long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
data_frame = iris.pandas.as_data_frame(cube, copy=False)
@skip_pandas
class TestSeriesAsCube(tests.IrisTest):
def test_series_simple(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_simple.cml')))
def test_series_object(self):
class Thing(object):
def __repr__(self):
return "A Thing"
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[Thing(), Thing(), Thing(), Thing(), Thing()])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_object.cml')))
def test_series_masked(self):
series = pandas.Series([0, float('nan'), 2, np.nan, 4],
index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_masked.cml')))
def test_series_datetime_gregorian(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2),
datetime.datetime(2003, 3, 3, 3, 3, 3),
datetime.datetime(2004, 4, 4, 4, 4, 4),
datetime.datetime(2005, 5, 5, 5, 5, 5)])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube',
'series_datetime_gregorian.cml')))
def test_series_netcdftime_360(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[netcdftime.datetime(2001, 1, 1, 1, 1, 1),
netcdftime.datetime(2002, 2, 2, 2, 2, 2),
netcdftime.datetime(2003, 3, 3, 3, 3, 3),
netcdftime.datetime(2004, 4, 4, 4, 4, 4),
netcdftime.datetime(2005, 5, 5, 5, 5, 5)])
self.assertCML(
iris.pandas.as_cube(series,
calendars={0: cf_units.CALENDAR_360_DAY}),
tests.get_result_path(('pandas', 'as_cube',
'series_netcdfimte_360.cml')))
def test_copy_true(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series)
cube.data[0] = 99
self.assertEqual(series[5], 0)
def test_copy_false(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series, copy=False)
cube.data[0] = 99
self.assertEqual(series[5], 99)
@skip_pandas
class TestDataFrameAsCube(tests.IrisTest):
def test_data_frame_simple(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_simple.cml')))
def test_data_frame_nonotonic(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[10, 10],
columns=[12, 12, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_nonotonic.cml')))
def test_data_frame_masked(self):
data_frame = pandas.DataFrame([[0, float('nan'), 2, 3, 4],
[5, 6, 7, np.nan, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_masked.cml')))
def test_data_frame_netcdftime_360(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[netcdftime.datetime(2001, 1, 1, 1, 1, 1),
netcdftime.datetime(2002, 2, 2, 2, 2, 2)],
columns=[10, 11, 12, 13, 14])
self.assertCML(
iris.pandas.as_cube(
data_frame,
calendars={0: cf_units.CALENDAR_360_DAY}),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_netcdftime_360.cml')))
def test_data_frame_datetime_gregorian(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[datetime.datetime(2001, 1, 1, 1, 1, 1),
datetime.datetime(2002, 2, 2, 2, 2, 2)],
columns=[10, 11, 12, 13, 14])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_datetime_gregorian.cml')))
def test_copy_true(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 0)
def test_copy_false(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame, copy=False)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 99)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
flightcom/freqtrade | freqtrade/tests/strategy/test_default_strategy.py | 1 | 1256 | import json
import pytest
from pandas import DataFrame
from freqtrade.strategy.default_strategy import DefaultStrategy, class_name
from freqtrade.analyze import parse_ticker_dataframe
@pytest.fixture
def result():
with open('freqtrade/tests/testdata/BTC_ETH-1.json') as data_file:
return parse_ticker_dataframe(json.load(data_file))
def test_default_strategy_class_name():
assert class_name == DefaultStrategy.__name__
def test_default_strategy_structure():
assert hasattr(DefaultStrategy, 'minimal_roi')
assert hasattr(DefaultStrategy, 'stoploss')
assert hasattr(DefaultStrategy, 'ticker_interval')
assert hasattr(DefaultStrategy, 'populate_indicators')
assert hasattr(DefaultStrategy, 'populate_buy_trend')
assert hasattr(DefaultStrategy, 'populate_sell_trend')
def test_default_strategy(result):
strategy = DefaultStrategy()
assert type(strategy.minimal_roi) is dict
assert type(strategy.stoploss) is float
assert type(strategy.ticker_interval) is int
indicators = strategy.populate_indicators(result)
assert type(indicators) is DataFrame
assert type(strategy.populate_buy_trend(indicators)) is DataFrame
assert type(strategy.populate_sell_trend(indicators)) is DataFrame
| gpl-3.0 |
OpenSoccerManager/opensoccermanager | uigtk/evaluation.py | 1 | 1745 | #!/usr/bin/env python3
# This file is part of OpenSoccerManager.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
import uigtk.widgets
class Evaluation(uigtk.widgets.Grid):
__name__ = "evaluation"
def __init__(self):
uigtk.widgets.Grid.__init__(self)
figure = Figure()
axis = figure.add_subplot(1, 1, 1)
axis.set_xlim(0, 46)
axis.set_xlabel("Week")
axis.set_ylim(0, 100)
axis.set_ylabel("Percentage Rating")
values = [0] * 46
line, = axis.plot(values, label='Chairman')
line, = axis.plot(values, label='Staff')
line, = axis.plot(values, label='Fans')
line, = axis.plot(values, label='Finances')
line, = axis.plot(values, label='Media')
axis.legend()
figurecanvas = FigureCanvas(figure)
figurecanvas.set_hexpand(True)
figurecanvas.set_vexpand(True)
self.add(figurecanvas)
def run(self):
self.show_all()
| gpl-3.0 |
yutiansut/QUANTAXIS | QUANTAXIS/QASU/trans_gm.py | 2 | 5663 | """
用于将本地掘金分钟数据转换为 QA 格式
有条件的用户可自行转换
"""
import datetime
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import pandas as pd
import pymongo
import os
import QUANTAXIS as QA
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_list
from QUANTAXIS.QAUtil import (
DATABASE, QA_util_date_stamp, QA_util_get_real_date, QA_util_log_info,
QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse)
def QA_SU_trans_stock_min(client=DATABASE, ui_log=None, ui_progress=None,
data_path: str = "D:\\gm\\", type_="1min"):
"""
将掘金本地数据导入 QA 数据库
:param client:
:param ui_log:
:param ui_progress:
:param data_path: 存放掘金数据的路径,默认文件名格式为类似 "SHSE.600000.csv" 格式
"""
code_list = list(map(lambda x: x.split(".")[1], os.listdir(data_path)))
coll = client.stock_min
coll.create_index([
("code", pymongo.ASCENDING),
("time_stamp", pymongo.ASCENDING),
("date_stamp", pymongo.ASCENDING),
])
err = []
def __transform_gm_to_qa(file_path: str = None, end_time: str = None, type_="1min"):
"""
导入相应 csv 文件,并处理格式
1. 这里默认为掘金数据格式:
amount bob close eob frequency high low open position pre_close symbol volume
0 2522972.0 2018-08-16 09:30:00+08:00 9.84 2018-08-16 09:31:00+08:00 60s 9.87 9.84 9.87 0 0.0 SHSE.600000 255900
1 3419453.0 2018-08-16 09:31:00+08:00 9.89 2018-08-16 09:32:00+08:00 60s 9.90 9.84 9.86 0 0.0 SHSE.600000 346400
...
2. 与 QUANTAXIS.QAFetch.QATdx.QA_fetch_get_stock_min 获取数据进行匹配,具体处理详见相应源码
open close high low vol amount ...
datetime
2018-12-03 09:31:00 10.99 10.90 10.99 10.90 2.211700e+06 2.425626e+07 ...
"""
if file_path is None:
raise ValueError("输入文件地址")
df_local = pd.read_csv(file_path)
# 列名处理
df_local = df_local.rename(columns={
"eob": "datetime",
"volume": "vol",
"symbol": "code"
}).drop(["bob", "frequency", "position", "pre_close"], axis=1)
# 格式处理
df_local["code"] = df_local["code"].map(str).str.slice(5, )
df_local["datetime"] = pd.to_datetime(df_local["datetime"].map(str).str.slice(
0, 19), utc=False)
df_local["date"] = df_local.datetime.map(str).str.slice(0, 10)
df_local = df_local.set_index("datetime", drop=False)
df_local["date_stamp"] = df_local["date"].apply(
lambda x: QA_util_date_stamp(x))
df_local["time_stamp"] = (
df_local["datetime"].map(str).apply(lambda x: QA_util_time_stamp(x)))
df_local["type"] = type_
df_local = df_local.loc[slice(None, end_time)]
df_local["datetime"] = df_local["datetime"].map(str)
df_local["type"] = type_
return df_local[[
"open",
"close",
"high",
"low",
"vol",
"amount",
"datetime",
"code",
"date",
"date_stamp",
"time_stamp",
"type",
]]
def __saving_work(code, coll):
QA_util_log_info(
"##JOB03 Now Saving STOCK_MIN ==== {}".format(code), ui_log=ui_log)
try:
col_filter = {"code": code, "type": type_}
ref_ = coll.find(col_filter)
end_time = ref_[0]['datetime'] # 本地存储分钟数据最早的时间
filename = "SHSE."+code + \
".csv" if code[0] == '6' else "SZSE."+code+".csv"
__data = __transform_gm_to_qa(
data_path+filename, end_time, type_) # 加入 end_time, 避免出现数据重复
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
type_,
code,
__data['datetime'].iloc[0],
__data['datetime'].iloc[-1],
type_,
),
ui_log=ui_log,
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(code)
QA_util_log_info(err, ui_log=ui_log)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(__saving_work, code_list[i_], coll)
for i_ in range(len(code_list))
}
count = 0
for i_ in concurrent.futures.as_completed(res):
strProgress = "TRANSFORM PROGRESS {} ".format(
str(float(count / len(code_list) * 100))[0:4] + "%")
intProgress = int(count / len(code_list) * 10000.0)
count = count + 1
if len(err) < 1:
QA_util_log_info("SUCCESS", ui_log=ui_log)
else:
QA_util_log_info(" ERROR CODE \n ", ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log)
if len(err) < 1:
QA_util_log_info("SUCCESS", ui_log=ui_log)
else:
QA_util_log_info(" ERROR CODE \n ", ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log)
if __name__ == "__main__":
QA_SU_trans_stock_min()()
| mit |
benhamner/GEFlightQuest | PythonModule/geflight/benchmark/only_estimated_arrival_benchmark.py | 3 | 2146 | from dateutil.parser import parse
from geflight.transform import flighthistoryevents, utilities as tu
from geflight.benchmark import utilities as bu
from geflight.benchmark import process_test_set_scaffold
import os
import pandas as pd
def get_estimated_arrival(row, arrival_type, midnight_time):
if row["estimated_%s_arrival" % arrival_type] != "MISSING":
return row["estimated_%s_arrival" % arrival_type]
return midnight_time
def process_day(day):
day.df_test_flight_history["estimated_runway_arrival"] = "MISSING"
day.df_test_flight_history["estimated_gate_arrival"] = "MISSING"
df_fhe = pd.read_csv(os.path.join(day.test_day_path, "FlightHistory",
"flighthistoryevents.csv"),
converters={"date_time_recorded": tu.parse_datetime_format6})
df_fhe = df_fhe.sort("date_time_recorded")
for i, row in df_fhe.iterrows():
f_id = row["flight_history_id"]
if f_id not in day.df_test_flight_history.index:
continue
if type(row["data_updated"]) != str:
continue
offset = day.df_test_flight_history["arrival_airport_timezone_offset"][f_id]
if offset>0:
offset_str = "+" + str(offset)
else:
offset_str = str(offset)
gate_str = flighthistoryevents.get_estimated_gate_arrival_string(row["data_updated"])
if gate_str:
day.df_test_flight_history["estimated_gate_arrival"][f_id] = parse(gate_str+offset_str)
runway_str = flighthistoryevents.get_estimated_runway_arrival_string(row["data_updated"])
if runway_str:
day.df_test_flight_history["estimated_runway_arrival"][f_id] = parse(runway_str+offset_str)
for i, row in day.df_test_flight_history.iterrows():
day.df_predictions["actual_runway_arrival"][i] = get_estimated_arrival(row, "runway", day.midnight_time)
day.df_predictions["actual_gate_arrival"][i] = get_estimated_arrival(row, "gate", day.midnight_time)
return day.df_predictions
if __name__=="__main__":
process_test_set_scaffold.process_test_set(process_day,
"only_estimated_arrival_benchmark.csv")
| bsd-2-clause |
scalable-networks/gnuradio-3.7.2.1 | gr-filter/examples/interpolate.py | 58 | 8816 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
gevero/deap | examples/ga/kursawefct.py | 12 | 2948 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import logging
import random
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_float", random.uniform, -5, 5)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, 3)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def checkBounds(min, max):
def decorator(func):
def wrappper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in range(len(child)):
if child[i] > max:
child[i] = max
elif child[i] < min:
child[i] = min
return offspring
return wrappper
return decorator
toolbox.register("evaluate", benchmarks.kursawe)
toolbox.register("mate", tools.cxBlend, alpha=1.5)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=3, indpb=0.3)
toolbox.register("select", tools.selNSGA2)
toolbox.decorate("mate", checkBounds(-5, 5))
toolbox.decorate("mutate", checkBounds(-5, 5))
def main():
random.seed(64)
MU, LAMBDA = 50, 100
pop = toolbox.population(n=MU)
hof = tools.ParetoFront()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
cxpb=0.5, mutpb=0.2, ngen=150,
stats=stats, halloffame=hof)
return pop, stats, hof
if __name__ == "__main__":
pop, stats, hof = main()
# import matplotlib.pyplot as plt
# import numpy
#
# front = numpy.array([ind.fitness.values for ind in pop])
# plt.scatter(front[:,0], front[:,1], c="b")
# plt.axis("tight")
# plt.show()
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.