repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
decvalts/cartopy | docs/source/sphinxext/plot_directive.py | 4 | 28779 | # This file is lifted from the change proposed in
# https://github.com/matplotlib/matplotlib/pull/6213.
# License: matplotlib BSD-3.
"""
A directive for including a Matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Return True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
n_shows = -1
for code_piece in code_pieces:
if len(code_pieces) > 1:
if 'plt.show()' in code_piece:
n_shows += 1
else:
# We don't want to inspect whether an image exists for a code
# piece without a show.
continue
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, n_shows, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
n_shows = -1
for code_piece in code_pieces:
if len(code_pieces) > 1 and 'plt.show()' in code_piece:
n_shows += 1
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, n_shows, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| gpl-3.0 |
WolfBerwouts/opengrid | opengrid/library/houseprint/site.py | 2 | 3909 | __author__ = 'Jan Pecinovsky'
import pandas as pd
"""
A Site is a physical entity (a house, appartment, school, or other building).
It may contain multiple devices and sensors.
The Site contains most of the metadata, eg. the number of inhabitants, the size
of the building, the location etc.
"""
class Site(object):
def __init__(self, hp=None, key=None, size=None, inhabitants=None,
postcode=None, construction_year=None, k_level=None,
e_level=None, epc_cert=None, tmpos=None):
self.hp = hp # backref to parent
self.key = key
self.size = size
self.inhabitants = inhabitants
self.postcode = postcode
self.construction_year = construction_year
self.k_level = k_level
self.e_level = e_level
self.epc_cert = epc_cert
self.devices = []
self._tmpos = tmpos
@property
def tmpos(self):
if self._tmpos is not None:
return self._tmpos
elif self.hp.tmpos is not None:
return self.hp.tmpos
else:
raise AttributeError('TMPO session not defined')
@property
def sensors(self):
s = []
for device in self.devices:
for sensor in device.sensors:
s.append(sensor)
return s
def __repr__(self):
return """
Site
Key: {}
{} devices
{} sensors
""".format(self.key,
len(self.devices),
len(self.sensors)
)
def get_sensors(self, sensortype = None):
"""
Return a list with all sensors in this site
Parameters
----------
sensortype: gas, water, electricity: optional
Returns
-------
list of Sensors
"""
return [sensor for sensor in self.sensors if sensor.type == sensortype or sensortype is None]
def get_data(self, sensortype=None, head=None, tail=None, diff='default', resample='min', unit='default'):
"""
Return a Pandas Dataframe with the joined data for all sensors in this device
Parameters
----------
sensors : list of Sensor objects
If None, use sensortype to make a selection
sensortype : string (optional)
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head, tail: timestamps,
diff : bool or 'default'
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
unit : str , default='default'
String representation of the target unit, eg m**3/h, kW, ...
Returns
-------
Pandas DataFrame
"""
sensors = self.get_sensors(sensortype)
series = [sensor.get_data(head=head, tail=tail, diff=diff, resample=resample, unit=unit) for sensor in sensors]
# workaround for https://github.com/pandas-dev/pandas/issues/12985
series = [s for s in series if not s.empty]
if series:
df = pd.concat(series, axis=1)
else:
df = pd.DataFrame()
# Add unit as string to each series in the df. This is not persistent: the attribute unit will get
# lost when doing operations with df, but at least it can be checked once.
for s in series:
try:
df[s.name].unit = s.unit
except:
pass
return df
def add_device(self, device):
"""
Parameters
----------
device : Device
"""
device.site = self
self.devices.append(device) | apache-2.0 |
oaklandanalytics/cutting_board | scripts/shared.py | 1 | 2826 | import pandas as pd
import numpy as np
import geopandas as gpd
from shapely import wkt
# someday this will be in geopandas
# setting the crs shouldn't be in here, but all my cases use it
def read_geocsv(*args, **kwargs):
df = pd.read_csv(*args, **kwargs)
df["geometry"] = [wkt.loads(s) for s in df["geometry"]]
gdf = gpd.GeoDataFrame(df)
gdf.crs = {'init': 'epsg:4326'}
return gdf
gpd.read_geocsv = read_geocsv
# make a link to look at google maps at a lat-lng
def feature_to_maps_link(row):
centroid = row.centroid
return "http://www.google.com/maps/place/%f,%f" % (centroid.y, centroid.x)
# geopandas plot two layers in relation to each other
def two_layer_map(top_layer, bottom_layer, column=None):
ax = bottom_layer.plot(figsize=(10, 8), column=column,
legend=(column is not None))
return top_layer.plot(ax=ax, color='pink', alpha=0.5, edgecolor="black")
# we're in 4326, so we need to convert the crs to meters and return the area
def compute_area(gdf):
gdf.crs = {'init': 'epsg:4326'}
return gdf.to_crs(epsg=3395).area
# compute the percent area contained in this shape from the shapes in the df
def compute_pct_area(df, total_area):
df["calc_area"] = compute_area(df).values
df["pct_area"] = df["calc_area"] / total_area
return df
# returns true when shape1 is more than 50% overlapping shape2
def more_than_half_inside(shape1, shape2):
overlap = gpd.overlay(
gpd.GeoDataFrame([shape1]),
gpd.GeoDataFrame([shape2]), how="intersection")
if len(overlap) == 0:
return False
overlap_area = compute_area(overlap).values[0]
poly1_area = compute_area(gpd.GeoDataFrame([shape1])).values[0]
return overlap_area / poly1_area > .5
def compute_overlap_areas(overlaps, overlapees):
'''
After a spatial join is done, this computes the actual area of the overlap.
overlaps is the result of the spatial join (which contains geometry for the
overlaper) overlapees is the geometry of the right side of the join
the "index_right" column of overlaps should be the index of overlapees
'''
total_overlaps = len(overlaps)
cnt = 0
overlap_area = []
for index, overlap in overlaps.iterrows():
overlapee = overlapees.loc[overlap.index_right]
try:
overlap_poly = gpd.overlay(
gpd.GeoDataFrame([overlap]),
gpd.GeoDataFrame([overlapee]), how="intersection")
except:
overlap_area.append(np.nan)
print("Failed:", index)
continue
if len(overlap_poly) == 0:
overlap_area.append(0)
continue
overlap_area.append(compute_area(overlap_poly).values[0])
return pd.Series(overlap_area, index=overlaps.index)
| bsd-3-clause |
ChengeLi/VehicleTracking | foreground_blob.py | 1 | 6824 | # find blobs in the incPCP foreground mask and code different blobs using different colors
import os
import sys
import cv2
import pdb
import pickle
import numpy as np
import glob as glob
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
import h5py
import scipy.ndimage as ndimg
from scipy.io import loadmat
from DataPathclass import *
DataPathobj = DataPath(dataSource,VideoIndex)
from parameterClass import *
Parameterobj = parameter(dataSource,VideoIndex)
def blobImg2blobmatrix(maskgray):
# maskgray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
# ret,thresholded = cv2.threshold(maskgray,127,255,0)
(blobLabelMatrix, numFgPixel) = ndimg.measurements.label(maskgray)
# BlobCenters = np.array(ndimg.measurements.center_of_mass(thresholded,blobLabelMatrix,range(1,numFgPixel+1)))
BlobCenters = np.array(ndimg.measurements.center_of_mass(maskgray,blobLabelMatrix,range(1,numFgPixel+1)))
# blobCenter_X_Matrix = np.zeros_like(blobLabelMatrix)
# blobCenter_Y_Matrix = np.zeros_like(blobLabelMatrix)
# for ii in range(numFgPixel):
# blobCenter_X_Matrix[blobCenterMatrix==ii]=BlobCenters[ii][0];
# blobCenter_Y_Matrix[blobCenterMatrix==ii]=BlobCenters[ii][1];
# pdb.set_trace()
return blobLabelMatrix, BlobCenters
def readData(userPCA):
if userPCA:
maskfiles = sorted(glob.glob(DataPathobj.blobPath + '*.mat'))
if len(glob.glob(DataPathobj.blobPath + '*.p'))==2*len(glob.glob(DataPathobj.blobPath + '*.mat')):
print "already processed"
return None, None
else:
maskfiles = sorted(glob.glob(DataPathobj.blobPath + '*running_bgsub_mask_tensor*.p'))
"""==============================================================================="""
"""change the offset!!"""
"""==============================================================================="""
offset = 0
maskfiles = maskfiles[offset:]
return maskfiles,offset
def readVideo(cap,subSampRate):
"""bug in cap.set, not accurate"""
# cap.set ( cv2.cv.CV_CAP_PROP_POS_FRAMES , max(0,position))
# status, frame = cap.read()
status, frame = cap.read()
for ii in range(subSampRate-1):
status, frameskip = cap.read()
return frame
if __name__ == '__main__':
userPCA = True
maskfiles, offset = readData(userPCA)
if maskfiles is None and offset is None:
print 'exit!!!'
sys.exit()
"""this frame count is not the same with what Matlab detected! bug in opencv"""
# nframe = np.int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
# fps = int(np.round(DataPathobj.cap.get(cv2.cv.CV_CAP_PROP_FPS)))
# assert fps==30, "fps=%d" %fps
fps = 30
existingFiles = sorted(glob.glob(DataPathobj.blobPath+'*.p'))
existingFileNames = []
for jj in range(len(existingFiles)):
existingFileNames.append(int(existingFiles[jj][-5:-2]))
# for matidx, matfile in enumerate(maskfiles):
for matidx in range(len(maskfiles)):
if matidx in existingFileNames:
print "alredy processed ", str(matidx)
continue
matfile = maskfiles[matidx]
if userPCA:
# try: #for matfile <-v7.3
# mask_tensor = loadmat(matfile)
# except: #for matfile -v7.3
h5pymatfile = h5py.File(matfile,'r').items()[0]
# variableName = h5pymatfile[0]
variableData = h5pymatfile[1]
mask_tensor = variableData.value
else:
mask_tensor_sparse = pickle.load(open(matfile,'rb'))
mask_tensor = []
for ii in range(len(mask_tensor_sparse)):
mask_tensor.append(csr_matrix(mask_tensor_sparse[ii]).toarray())
mask_tensor = np.array(mask_tensor)
print 'mask_tensor.shape',mask_tensor.shape
trunclen = Parameterobj.trunclen
subSampRate = int(fps/Parameterobj.targetFPS)
subSampRate_matlab = int(30/Parameterobj.targetFPS) ##IT'S JUST 6
blobLabelMtxList = []
blobCenterList = []
Nrows = DataPathobj.cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
Ncols = DataPathobj.cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
"""in the incPCP code, mask file is saved as a mtx with width=fps/5*600"""
# MaskMatfileShape=int((fps/5)*trunclen)
MaskMatfileShape=int((30/5)*trunclen)
frame_idx = 0
global_frame_idx = int(mask_tensor.shape[0]*(offset+matidx)+frame_idx*subSampRate_matlab)
while frame_idx*subSampRate_matlab <mask_tensor.shape[0]:
print "frame_idx: ", frame_idx
print "global_frame_idx: ", global_frame_idx
# ori_img = cv2.imread(ori_list[(fkltrame_idx*subSampRate)/choice_Interval])
# mask = cv2.imread(mask_list[(frame_idx*subSampRate)/choice_Interval])
if userPCA:
ImgSlice = (mask_tensor[frame_idx*subSampRate_matlab,:].reshape((Ncols,Nrows))).transpose() #Careful!! Warning! It's transposed!
else:
ImgSlice = mask_tensor[frame_idx*subSampRate_matlab,:].reshape((Nrows,Ncols))
maskgray = ImgSlice
# """see the foreground blob image"""
# plt.imshow(np.uint8(ImgSlice),cmap = 'gray')
# plt.draw()
# plt.pause(0.0001)
"""visualization"""
# frame = readVideo(DataPathobj.cap,subSampRate)
# frame2 = frame
# # # frame2[:,:,1] = frame[:,:,1]+(maskgray*100)
# # # foreground = (np.array(frame2[:,:,1])*[np.array(maskgray==1)])[0,:,:]
# maskedFrame = (frame[:,:,1]*maskgray)
# maskedFrame[maskedFrame!=0]=255
# maskedFrame_inv = frame[:,:,1]*(1-maskgray)
# frame2[:,:,1] = maskedFrame+maskedFrame_inv
# # cv2.imwrite(DataPathobj.blobPath +str(frame_idx*subSampRate+subSampRate*trunclen*matidx).zfill(7)+'.jpg',frame2)
# # cv2.imshow('frame2', frame2)
# # cv2.waitKey(0)
# plt.imshow(frame2[:,:,::-1])
# plt.draw()
# plt.pause(0.001)
"""use ndimage.measurements"""
blobLabelMatrix, BlobCenters = blobImg2blobmatrix(maskgray)
sparse_slice = csr_matrix(blobLabelMatrix)
blobLabelMtxList.append(sparse_slice)
blobCenterList.append(BlobCenters)
frame_idx = frame_idx+1
global_frame_idx = int(mask_tensor.shape[0]*(offset+matidx)+frame_idx*subSampRate)
#end of while loop
# if global_frame_idx<1800:
# continue
if ((frame_idx>0) and (np.mod(frame_idx,trunclen)==0)) or (frame_idx*subSampRate_matlab>=mask_tensor.shape[0]):
print "Save the blob index tensor into a pickle file:"
# savename = os.path.join(DataPathobj.blobPath,'blobLabelList'+str(matidx+1+offset).zfill(3)+'.p')
# index = global_frame_idx/(trunclen*subSampRate)
index = offset+matidx
print 'index',index
savename = os.path.join(DataPathobj.blobPath,'blobLabelList'+str(index).zfill(3)+'.p')
pickle.dump(blobLabelMtxList, open( savename, "wb" ))
blobLabelMtxList = []
print "Save the blob centers..."
# savename = os.path.join(DataPathobj.blobPath,'blobCenterList'+str(matidx+1+offset).zfill(3)+'.p')
savename = os.path.join(DataPathobj.blobPath,'blobCenterList'+str(index).zfill(3)+'.p')
pickle.dump(blobCenterList, open( savename, "wb" ))
blobCenterList = []
"""directly dumping to pickle not allowed, memory error! """
| mit |
artmusic0/theano-learning.part02 | Myfile_run-py_releasev5_SFMAKE/run_v5.0_predict.py | 7 | 2332 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 02:31:03 2015
@author: winpython
"""
import os
import sys, getopt
import time
import numpy as np
from cnn_training_computation import fit, predict
import pickle, cPickle
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
from PIL import Image
def run():
print '... loading data'
test_set = np.zeros((200,147456),dtype=np.float32)
pil_im = Image.open( "P_20160109_190757.jpg" ).convert('L')
pil_im = pil_im.resize((512, 288), Image.BILINEAR )
pil_im = np.array(pil_im)
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
note = 0
for j in range(288):
for k in range(512):
test_set[0][note]= ((255 - pil_im[j][k])/225.)
note += 1
"""
# read the data, labels
data = np.genfromtxt("data/mnist_train.data")
print ". .",
test_data = np.genfromtxt("data/mnist_test.data")
print ". .",
valid_data = np.genfromtxt("data/mnist_valid.data")
labels = np.genfromtxt("data/mnist_train.solution")
"""
print ". . finished reading"
"""
# DO argmax
labels = np.argmax(labels, axis=1)
print labels
# normalization
amean = np.mean(data)
data = data - amean
astd = np.std(data)
data = data / astd
# normalise using coefficients from training data
test_data = (test_data - amean) / astd
valid_data = (valid_data - amean) / astd
"""
#fit(data, labels)
print "開始預測..."
rv = predict(test_set)
# UNDO argmax and save results x 2
r = rv
N = len(r)
res = np.zeros((N, 20))
for i in range(N):
res[i][r[i]] = 1
print "=================================================================="
print " "
print " "
print "predict Tag : [ 0 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 19 ]"
print " "
print " ---------------------------------------------------- "
print " "
print "predict Value:",
print res[0]
print res[1]
print " "
print " "
print "=================================================================="
#np.savetxt("test.predict", res, fmt='%i')
print "finished predicting."
if __name__ == '__main__':
run() | gpl-3.0 |
adazey/Muzez | libs/nltk/tbl/demo.py | 1 | 15081 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Marcus Uneson <[email protected]>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, absolute_import, division
import os
import pickle
import random
import time
from nltk.corpus import treebank
from nltk.tbl import error_list, Template
from nltk.tag.brill import Word, Pos
from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
def demo():
"""
Run a demo with defaults. See source comments for details,
or docstrings of any of the more specific demo_* functions.
"""
postag()
def demo_repr_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="repr")
def demo_str_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="str")
def demo_verbose_rule_format():
"""
Exemplify Rule.format("verbose")
"""
postag(ruleformat="verbose")
def demo_multiposition_feature():
"""
The feature/s of a template takes a list of positions
relative to the current word where the feature should be
looked for, conceptually joined by logical OR. For instance,
Pos([-1, 1]), given a value V, will hold whenever V is found
one step to the left and/or one step to the right.
For contiguous ranges, a 2-arg form giving inclusive end
points can also be used: Pos(-3, -1) is the same as the arg
below.
"""
postag(templates=[Template(Pos([-3,-2,-1]))])
def demo_multifeature_template():
"""
Templates can have more than a single feature.
"""
postag(templates=[Template(Word([0]), Pos([-2,-1]))])
def demo_template_statistics():
"""
Show aggregate statistics per template. Little used templates are
candidates for deletion, much used templates may possibly be refined.
Deleting unused templates is mostly about saving time and/or space:
training is basically O(T) in the number of templates T
(also in terms of memory usage, which often will be the limiting factor).
"""
postag(incremental_stats=True, template_stats=True)
def demo_generated_templates():
"""
Template.expand and Feature.expand are class methods facilitating
generating large amounts of templates. See their documentation for
details.
Note: training with 500 templates can easily fill all available
even on relatively small corpora
"""
wordtpls = Word.expand([-1,0,1], [1,2], excludezero=False)
tagtpls = Pos.expand([-2,-1,0,1], [1,2], excludezero=True)
templates = list(Template.expand([wordtpls, tagtpls], combinations=(1,3)))
print("Generated {0} templates for transformation-based learning".format(len(templates)))
postag(templates=templates, incremental_stats=True, template_stats=True)
def demo_learning_curve():
"""
Plot a learning curve -- the contribution on tagging accuracy of
the individual rules.
Note: requires matplotlib
"""
postag(incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png")
def demo_error_analysis():
"""
Writes a file with context for each erroneous word after tagging testing data
"""
postag(error_output="errors.txt")
def demo_serialize_tagger():
"""
Serializes the learned tagger to a file in pickle format; reloads it
and validates the process.
"""
postag(serialize_output="tagger.pcl")
def demo_high_accuracy_rules():
"""
Discard rules with low accuracy. This may hurt performance a bit,
but will often produce rules which are more interesting read to a human.
"""
postag(num_sents=3000, min_acc=0.96, min_score=10)
def postag(
templates=None,
tagged_data=None,
num_sents=1000,
max_rules=300,
min_score=3,
min_acc=None,
train=0.8,
trace=3,
randomize=False,
ruleformat="str",
incremental_stats=False,
template_stats=False,
error_output=None,
serialize_output=None,
learning_curve_output=None,
learning_curve_take=300,
baseline_backoff_tagger=None,
separate_baseline_data=False,
cache_baseline_tagger=None):
"""
Brill Tagger Demonstration
:param templates: how many sentences of training and testing data to use
:type templates: list of Template
:param tagged_data: maximum number of rule instances to create
:type tagged_data: C{int}
:param num_sents: how many sentences of training and testing data to use
:type num_sents: C{int}
:param max_rules: maximum number of rule instances to create
:type max_rules: C{int}
:param min_score: the minimum score for a rule in order for it to be considered
:type min_score: C{int}
:param min_acc: the minimum score for a rule in order for it to be considered
:type min_acc: C{float}
:param train: the fraction of the the corpus to be used for training (1=all)
:type train: C{float}
:param trace: the level of diagnostic tracing output to produce (0-4)
:type trace: C{int}
:param randomize: whether the training data should be a random subset of the corpus
:type randomize: C{bool}
:param ruleformat: rule output format, one of "str", "repr", "verbose"
:type ruleformat: C{str}
:param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
:type incremental_stats: C{bool}
:param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
:type template_stats: C{bool}
:param error_output: the file where errors will be saved
:type error_output: C{string}
:param serialize_output: the file where the learned tbl tagger will be saved
:type serialize_output: C{string}
:param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
:type learning_curve_output: C{string}
:param learning_curve_take: how many rules plotted
:type learning_curve_take: C{int}
:param baseline_backoff_tagger: the file where rules will be saved
:type baseline_backoff_tagger: tagger
:param separate_baseline_data: use a fraction of the training data exclusively for training baseline
:type separate_baseline_data: C{bool}
:param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
deterministic output from the baseline unigram tagger between python versions)
:type cache_baseline_tagger: C{string}
Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
is fast and fine for a demo, but is likely to generalize worse on unseen data.
Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
"""
# defaults
baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
if templates is None:
from nltk.tag.brill import describe_template_sets, brill24
# some pre-built template sets taken from typical systems or publications are
# available. Print a list with describe_template_sets()
# for instance:
templates = brill24()
(training_data, baseline_data, gold_data, testing_data) = \
_demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data)
# creating (or reloading from cache) a baseline tagger (unigram tagger)
# this is just a mechanism for getting deterministic output from the baseline between
# python versions
if cache_baseline_tagger:
if not os.path.exists(cache_baseline_tagger):
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
with open(cache_baseline_tagger, 'w') as print_rules:
pickle.dump(baseline_tagger, print_rules)
print("Trained baseline tagger, pickled it to {0}".format(cache_baseline_tagger))
with open(cache_baseline_tagger, "r") as print_rules:
baseline_tagger= pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(cache_baseline_tagger))
else:
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
print("Trained baseline tagger")
if gold_data:
print(" Accuracy on test set: {0:0.4f}".format(baseline_tagger.evaluate(gold_data)))
# creating a Brill tagger
tbrill = time.time()
trainer = BrillTaggerTrainer(baseline_tagger, templates, trace, ruleformat=ruleformat)
print("Training tbl tagger...")
brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
print("Trained tbl tagger in {0:0.2f} seconds".format(time.time() - tbrill))
if gold_data:
print(" Accuracy on test set: %.4f" % brill_tagger.evaluate(gold_data))
# printing the learned rules, if learned silently
if trace == 1:
print("\nLearned rules: ")
for (ruleno, rule) in enumerate(brill_tagger.rules(),1):
print("{0:4d} {1:s}".format(ruleno, rule.format(ruleformat)))
# printing template statistics (optionally including comparison with the training data)
# note: if not separate_baseline_data, then baseline accuracy will be artificially high
if incremental_stats:
print("Incrementally tagging the test data, collecting individual rule statistics")
(taggedtest, teststats) = brill_tagger.batch_tag_incremental(testing_data, gold_data)
print(" Rule statistics collected")
if not separate_baseline_data:
print("WARNING: train_stats asked for separate_baseline_data=True; the baseline "
"will be artificially high")
trainstats = brill_tagger.train_stats()
if template_stats:
brill_tagger.print_template_statistics(teststats)
if learning_curve_output:
_demo_plot(learning_curve_output, teststats, trainstats, take=learning_curve_take)
print("Wrote plot of learning curve to {0}".format(learning_curve_output))
else:
print("Tagging the test data")
taggedtest = brill_tagger.tag_sents(testing_data)
if template_stats:
brill_tagger.print_template_statistics()
# writing error analysis to file
if error_output is not None:
with open(error_output, 'w') as f:
f.write('Errors for Brill Tagger %r\n\n' % serialize_output)
f.write(u'\n'.join(error_list(gold_data, taggedtest)).encode('utf-8') + '\n')
print("Wrote tagger errors including context to {0}".format(error_output))
# serializing the tagger to a pickle file and reloading (just to see it works)
if serialize_output is not None:
taggedtest = brill_tagger.tag_sents(testing_data)
with open(serialize_output, 'w') as print_rules:
pickle.dump(brill_tagger, print_rules)
print("Wrote pickled tagger to {0}".format(serialize_output))
with open(serialize_output, "r") as print_rules:
brill_tagger_reloaded = pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(serialize_output))
taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
if taggedtest == taggedtest_reloaded:
print("Reloaded tagger tried on test set, results identical")
else:
print("PROBLEM: Reloaded tagger gave different results on test set")
def _demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data):
# train is the proportion of data used in training; the rest is reserved
# for testing.
if tagged_data is None:
print("Loading tagged data from treebank... ")
tagged_data = treebank.tagged_sents()
if num_sents is None or len(tagged_data) <= num_sents:
num_sents = len(tagged_data)
if randomize:
random.seed(len(tagged_data))
random.shuffle(tagged_data)
cutoff = int(num_sents * train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:num_sents]
testing_data = [[t[0] for t in sent] for sent in gold_data]
if not separate_baseline_data:
baseline_data = training_data
else:
bl_cutoff = len(training_data) // 3
(baseline_data, training_data) = (training_data[:bl_cutoff], training_data[bl_cutoff:])
(trainseqs, traintokens) = corpus_size(training_data)
(testseqs, testtokens) = corpus_size(testing_data)
(bltrainseqs, bltraintokens) = corpus_size(baseline_data)
print("Read testing data ({0:d} sents/{1:d} wds)".format(testseqs, testtokens))
print("Read training data ({0:d} sents/{1:d} wds)".format(trainseqs, traintokens))
print("Read baseline data ({0:d} sents/{1:d} wds) {2:s}".format(
bltrainseqs, bltraintokens, "" if separate_baseline_data else "[reused the training set]"))
return (training_data, baseline_data, gold_data, testing_data)
def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
testcurve = [teststats['initialerrors']]
for rulescore in teststats['rulescores']:
testcurve.append(testcurve[-1] - rulescore)
testcurve = [1 - x/teststats['tokencount'] for x in testcurve[:take]]
traincurve = [trainstats['initialerrors']]
for rulescore in trainstats['rulescores']:
traincurve.append(traincurve[-1] - rulescore)
traincurve = [1 - x/trainstats['tokencount'] for x in traincurve[:take]]
import matplotlib.pyplot as plt
r = list(range(len(testcurve)))
plt.plot(r, testcurve, r, traincurve)
plt.axis([None, None, None, 1.0])
plt.savefig(learning_curve_output)
NN_CD_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'.*', 'NN')])
REGEXP_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
def corpus_size(seqs):
return (len(seqs), sum(len(x) for x in seqs))
if __name__ == '__main__':
demo_learning_curve()
| gpl-3.0 |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/_pydev_bundle/pydev_console_utils.py | 2 | 23983 | import os
import sys
import traceback
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec
from _pydev_bundle._pydev_calltip_util import get_description
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import (IS_JYTHON, dict_iter_items, NEXT_VALUE_SEPARATOR, get_global_debugger,
silence_warnings_decorator)
from contextlib import contextmanager
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_utils import interrupt_main_thread
try:
import cStringIO as StringIO # may not always be available @UnusedImport
except:
try:
import StringIO # @Reimport
except:
import io as StringIO
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = sys.stdin.errors # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
# Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
else:
# readline should end with '\n' (not doing so makes IPython 5 remove the last *valid* character).
requested_input += '\n'
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, py_db, original_stdin):
'''
:param py_db:
If None, get_global_debugger() is used.
'''
BaseStdIn.__init__(self, original_stdin)
self._py_db = py_db
self._in_notification = 0
def __send_input_requested_message(self, is_started):
try:
py_db = self._py_db
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
return
cmd = py_db.cmd_factory.make_input_requested_message(is_started)
py_db.writer.add_command(cmd)
except Exception:
pydev_log.exception()
@contextmanager
def notify_input_requested(self):
self._in_notification += 1
if self._in_notification == 1:
self.__send_input_requested_message(True)
try:
yield
finally:
self._in_notification -= 1
if self._in_notification == 0:
self.__send_input_requested_message(False)
def readline(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.readline(*args, **kwargs)
def read(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.read(*args, **kwargs)
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread, connect_status_queue=None):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = None
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.mpl_modules_for_patching = {}
self.init_mpl_modules_for_patching()
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
def init_mpl_modules_for_patching(self):
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
self.mpl_modules_for_patching = {
"matplotlib": lambda: activate_matplotlib(self.enableGui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab
}
def need_more_for_code(self, source):
# PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations
# Strangely even the IPython console is_complete said it was complete
# even with a continuation char at the end.
if source.endswith('\\'):
return True
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
# At this point, it should always be single.
# If we don't do this, things as:
#
# for i in range(10): print(i)
#
# (in a single line) don't work.
# Note that it won't give an error and code will be None (so, it'll
# use execMultipleLines in the next call in this case).
symbol = 'single'
code = self.interpreter.compile(source, '<input>', symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def need_more(self, code_fragment):
if self.buffer is None:
self.buffer = code_fragment
else:
self.buffer.append(code_fragment)
return self.need_more_for_code(self.buffer.text)
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.host, self.client_port, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(py_db=debugger, original_stdin=original_std_in)
def add_exec(self, code_fragment, debugger=None):
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] # Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
# You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
# Just ignore any error here
pass
more = False
try:
sys.stdin = self.create_std_in(debugger, original_in)
try:
if help is not None:
# This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
traceback.print_exc()
try:
self.start_exec()
if hasattr(self, 'debugger'):
self.debugger.enable_tracing()
more = self.do_add_exec(code_fragment)
if hasattr(self, 'debugger'):
self.debugger.disable_tracing()
self.finish_exec(more)
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
traceback.print_exc()
finally:
sys.__excepthook__ = sys.excepthook
return more
def do_add_exec(self, codeFragment):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def get_namespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def __resolve_reference__(self, text):
"""
:type text: str
"""
obj = None
if '.' not in text:
try:
obj = self.get_namespace()[text]
except KeyError:
pass
if obj is None:
try:
obj = self.get_namespace()['__builtins__'][text]
except:
pass
if obj is None:
try:
obj = getattr(self.get_namespace()['__builtins__'], text, None)
except:
pass
else:
try:
last_dot = text.rindex('.')
parent_context = text[0:last_dot]
res = pydevd_vars.eval_in_context(parent_context, self.get_namespace(), self.get_namespace())
obj = getattr(res, text[last_dot + 1:])
except:
pass
return obj
def getDescription(self, text):
try:
obj = self.__resolve_reference__(text)
if obj is None:
return ''
return get_description(obj)
except:
return ''
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
return self.do_exec_code(line, True)
def execMultipleLines(self, lines):
if IS_JYTHON:
more = False
for line in lines.split('\n'):
more = self.do_exec_code(line, True)
return more
else:
return self.do_exec_code(lines, False)
def interrupt(self):
self.buffer = None # Also clear the buffer when it's interrupted.
try:
if self.interruptable:
# Fix for #PyDev-500: Console interrupt can't interrupt on sleep
interrupt_main_thread(self.mainThread)
self.finish_exec(False)
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def start_exec(self):
self.interruptable = True
def get_server(self):
if getattr(self, 'host', None) is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.ShowConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished(more)
else:
return True
def getFrame(self):
xml = StringIO.StringIO()
hidden_ns = self.get_ipython_hidden_vars_dict()
xml.write("<xml>")
xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def getVariable(self, attributes):
xml = StringIO.StringIO()
xml.write("<xml>")
val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes)
if val_dict is None:
val_dict = {}
for k, val in dict_iter_items(val_dict):
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
return xml.getvalue()
def getArray(self, attr, roffset, coffset, rows, cols, format):
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format)
def evaluate(self, expression):
xml = StringIO.StringIO()
xml.write("<xml>")
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def loadFullValue(self, seq, scope_attrs):
"""
Evaluate full value for async Console variables in a separate thread and send results to IDE side
:param seq: id of command
:param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR
(i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2)
:return:
"""
frame_variables = self.get_namespace()
var_objects = []
vars = scope_attrs.split(NEXT_VALUE_SEPARATOR)
for var_attrs in vars:
if '\t' in var_attrs:
name, attrs = var_attrs.split('\t', 1)
else:
name = var_attrs
attrs = None
if name in frame_variables:
var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs)
var_objects.append((var_object, name))
else:
var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables)
var_objects.append((var_object, name))
from _pydevd_bundle.pydevd_comm import GetValueAsyncThreadConsole
py_db = getattr(self, 'debugger', None)
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
from pydevd import PyDB
py_db = PyDB()
t = GetValueAsyncThreadConsole(py_db, self.get_server(), seq, var_objects)
t.start()
def changeVariable(self, attr, value):
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
def connectToDebugger(self, debuggerPort, debugger_options=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
if debugger_options is None:
debugger_options = {}
env_key = "PYDEVD_EXTRA_ENVS"
if env_key in debugger_options:
for (env_name, value) in dict_iter_items(debugger_options[env_key]):
existing_value = os.environ.get(env_name, None)
if existing_value:
os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value)
else:
os.environ[env_name] = value
if env_name == "PYTHONPATH":
sys.path.append(value)
del debugger_options[env_key]
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_imps._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n')
from _pydevd_bundle.pydevd_constants import set_thread_id
from _pydev_bundle import pydev_localhost
set_thread_id(threading.currentThread(), "console_main")
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
f = FakeFrame()
f.f_back = None
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
self.debugger = pydevd.PyDB()
self.debugger.add_fake_frame(thread_id=VIRTUAL_CONSOLE_ID, frame_id=VIRTUAL_FRAME_ID, frame=f)
try:
pydevd.apply_debugger_options(debugger_options)
self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)
self.debugger.prepare_to_run()
self.debugger.disable_tracing()
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
def enableGui(self, guiname):
''' Enable the GUI specified in guiname (see inputhook for list).
As with IPython, enabling multiple GUIs isn't an error, but
only the last one's main loop runs and it may not work
'''
def do_enable_gui():
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_enable_gui)
def get_ipython_hidden_vars_dict(self):
return None
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
| epl-1.0 |
osh/gr-eventsim | apps/plot_results_save2.py | 1 | 1648 | #!/usr/bin/env python
import json, sys, pprint, re
import matplotlib.pyplot as plt
import numpy as np
metrics = ["eps_final"]
fn = sys.argv[1]
print "loading %s"%(fn)
records = json.loads(open(fn,"r").read())
# Compute and extract numbers from output strings
for i in range(0,len(records)):
e = filter(lambda y: not y == None, map(lambda x: re.search('events\W+= (\d+) \((\d+\.\d+)\/sec', x), records[i]['output']))
print len(e)
if(len(e) >= 1):
records[i]['e_final'] = float(e[-1].group(1))
records[i]['eps_final'] = float(e[-1].group(2))
records = filter(lambda x: x.has_key("eps_final"), records);
nsinks = sorted(list(set(map(lambda x: x["nsinks"], records))))
nthreads = sorted(list(set(map(lambda x: x["nthreads"], records))))
names = set(map(lambda x: x["d"], records))
handles = []
for metric in metrics:
plt.figure()
for name in names:
for ns in nsinks:
perf = [];
lbl = "Nsinks = %d"%(ns)
for nt in nthreads:
p = np.mean( map(lambda x: x[metric], filter( lambda y: y["d"]==name and y["nthreads"]==nt and y["nsinks"]==ns, records) ) )
perf.append(p)
print len(nthreads), len(perf)
h, = plt.plot(nthreads, perf, label=lbl)
print nthreads,perf
handles.append(h)
plt.xlabel("Number of Threads");
plt.ylabel("Events Per Second");
plt.title("Event Performance vs. Number of Threads vs. Number of Sinks")
ax = plt.subplot(111)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc="lower left")
#plt.legend(handles=handles)
plt.show()
| gpl-3.0 |
ishanic/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
Asiant/trump | trump/extensions/symbol_aggs.py | 1 | 8205 | # -*- coding: utf-8 -*-
###############################################################################
#
# PyLint tests that will never be applied for this file.
#
# Unused variables, these functions are organized so that they can be called
# from a string at runtime, from it's name being stored
# in an SQLAlchemy object attribute.
# pylint: disable-msg=W0612
###############################################################################
#
# PyLint tests that will be eventually fixed.
#
# Unused argument, the functionality, once it's implemented, will use
# the argument.
# pylint: disable-msg=W0613
"""
This module creates the functions that get used in symbol aggregation
There are row-based, and column-based, function builders, just to stay
organized.
"""
import pandas as pd
nan = pd.np.nan
def sorted_feed_cols(df):
"""
takes a dataframe's columns that would be of the form:
['feed003', 'failsafe_feed999', 'override_feed000', 'feed001', 'feed002']
and returns:
['override_feed000', 'feed001', 'feed002', 'feed003', 'failsafe_feed999']
"""
cols = df.columns
ind = [int(c.split("feed")[1]) for c in cols]
cols = zip(ind,cols)
cols.sort()
cols = [c[1] for c in cols]
return cols
def _row_wise_priority(adf):
adf = adf.dropna()
if len(adf) > 0:
return adf.values[0]
else:
return nan
class ApplyRow(object):
"""
Mixer used to identify row-based logic methods for
Trump's Feed aggregation step.
All these functions, should take in a dataframe of multiple columns,
and return a DataFrame with a single column, or a Series.
"""
@staticmethod
def priority_fill(adf):
"""
Looks at each row, and chooses the value from the highest priority
(lowest #) feed, one row at a time.
"""
# the logic to apply overrides, values from certain feeds,
# or the failsafes, is needed for high-level functions
# in this same file.
# so "priority_fill" just wraps this, for organization
# purposes.
return _row_wise_priority(adf)
@staticmethod
def mean_fill(adf):
""" Looks at each row, and calculates the mean. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].mean()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan
@staticmethod
def median_fill(adf):
""" Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].median()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan
@staticmethod
def custom(adf):
"""
A custom Apply-Row Aggregator can be defined,
as any function which accepts a Series, and returns
any number-like object, which will get
assigned to the Dataframe's 'final' column in
using the pandas .apply, function.
"""
return [0] * len(adf)
class ChooseCol(object):
"""
Builds a dictionary of column-based logic to be applied by
Trump's aggregation step.
All these functions, should take in a dataframe of multiple columns,
and return a DataFrame with a single column, or a Series.
"""
@staticmethod
def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df
@staticmethod
def most_recent(adf):
"""
Looks at each column, and chooses the feed with the most recent data
point. Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the feeds with the most recent data...
feeds_with_data = feeds_only.dropna(how='all')
selected_feeds = feeds_with_data.T.dropna().index
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if there all empyty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df
@staticmethod
def custom(adf):
"""
A custom Choose-Column Aggregator can be defined,
as any function which accepts a dataframe, and returns
any Series-like object, which will get
assigned to the Dataframe's 'final' column.
"""
return [0] * len(adf)
class FeedAggregator(ApplyRow, ChooseCol):
def __init__(self,method):
try:
self.meth = getattr(self, method)
except:
raise "{} is not an arggregator method".format(method)
self.methname = method
def aggregate(self,df):
if self.methname in ApplyRow.__dict__:
return df.apply(self.meth, axis=1)
elif self.methname in ChooseCol.__dict__:
return self.meth(df)
else:
NotImplemented("This code path could be an ugly implementation, " + \
"of a default?")
if __name__ == '__main__':
def make_fake_feed_data(l=10):
dr = pd.date_range(start='2015-01-10', periods=l, freq='D')
data = pd.np.random.rand(l)
return pd.Series(data,dr)
ors = make_fake_feed_data(1).shift(1,freq='D')
s1 = make_fake_feed_data(10)
s2 = make_fake_feed_data(5)
s3 = make_fake_feed_data(7)
fls = make_fake_feed_data(1).shift(8,freq='D')
s1.iloc[6] = pd.np.nan
s1.iloc[8] = pd.np.nan
cols = ['override_'] + [''] * 3
cols = [c + "feed{0:03d}".format(i) for i, c in enumerate(cols)]
cols = cols + ['failsafe_feed999']
df = pd.concat([ors, s1, s2, s3, fls], axis=1)
df.columns = cols
df['final'] = FeedAggregator('most_populated').aggregate(df)
print df
#assert df['final'].iloc[1] == df['override_feed000'].iloc[1]
#assert df['final'].iloc[-1] == df['feed001'].iloc[-1]
#assert df['final'].iloc[-2] == df['failsafe_feed999'].iloc[-2]
#assert df['final'].iloc[-4] == df['feed003'].iloc[-4]
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
NMTHydro/Recharge | utils/ascii_file_plotter_vers_2.py | 1 | 7569 | # ===============================================================================
# Copyright 2018 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ============= local library imports ===========================
def parse_file(path, first_col, num_cols):
"""
:param path:
:return:
"""
# initialize dicts and lists....
ascii_unformatted_dict = {}
good_lines = []
ascii_dict = {}
with open(path, 'r') as readfile:
for line in readfile:
# print "line ->{}".format(line)
# kill the spaces, make a list, check by the length of the list.
line_lst = line.split(" ")
# print "line list -> {}".format(line_lst)
#kill spaces
space_free = list(filter(lambda a: a != '', line_lst))
# print "space free! -> {}".format(space_free)
if len(space_free) == num_cols and space_free[0] != first_col:
# print "spacefree in loop", space_free
goodln = space_free
# Turn the lines into floats...
bestln = [float(i) for i in goodln]
# ascii_unformatted_dict['good_lines'].append(bestln)
good_lines.append(bestln)
# print "the full goodlines list of lists \n", good_lines
# put column numbers in a list
rng_list = []
for i in range(num_cols):
# print "eye", i+1
rng_list.append(i+1)
# add empty lists to ascii_dict to append to later...
for i in rng_list:
ascii_dict["{}".format(i)] = []
# use zip() to pull the correct list item from each line of the ascii into the correct column heading set up in...
# ...the ascii_dict
for lst in good_lines:
for h, val in zip(rng_list, lst):
# print "h", h
# print "val", val
ascii_dict["{}".format(h)].append(val)
# print "new ascii dict {}".format(ascii_dict)
return ascii_dict
def plotter(x, y):
"""
:param x: your dependent variable
:param y: your independent variable
:return:
"""
# todo - make little gridlines
# create a variable for ideal ndvi
ndvi = x
ideal_etrf = []
for i in ndvi:
if i >= 0.8:
ideal_etrf.append(1)
elif i < 0.8:
ideal_etrf.append(i * 1.25)
# turn your x and y into numpy arrays
x = np.array(x)
y = np.array(y)
ideal_etrf = np.array(ideal_etrf)
ETrF_vs_NDVI = plt.figure()
aa = ETrF_vs_NDVI.add_subplot(111)
aa.set_title('ETrF vs NDVI', fontweight='bold')
aa.set_xlabel('NDVI', style='italic')
aa.set_ylabel('ETrF', style='italic')
aa.scatter(x, y, facecolors='none', edgecolors='blue')
aa.scatter(x, ideal_etrf, facecolors='none', edgecolors='red')
plt.minorticks_on()
# aa.grid(b=True, which='major', color='k')
aa.grid(b=True, which='minor', color='white')
plt.tight_layout()
# TODO - UNCOMMENT AND CHANGE THE PATH TO SAVE THE FIGURE AS A PDF TO A GIVEN LOCATION.
# plt.savefig(
# "/Volumes/SeagateExpansionDrive/jan_metric_PHX_GR/green_river_stack/stack_output/20150728_ETrF_NDVI_gr.pdf")
plt.show()
def simple_plot(x,y):
""""""
# turn your x and y into numpy arrays
x = np.array(x)
y = np.array(y)
plt.scatter(x, y)
plt.show()
def df_filter(df, good_codes, code_col):
"""
:param df: pandas dataframe
:param good_codes: is a list of ints of desirable codes
:param code_col: is a string that indicates which col of dataframe your codes are in
:return: filtered dataframe w only rows that contain good codes.
"""
# for code in good_codes:
#
# df = df[df[code_col] != code]
#
# print "filtered df {}".format(df)
#
# return df
code = df[code_col]
# print "CODE\n", code
for i in code:
if i not in good_codes:
df = df[df[code_col] != i]
return df
def run_ascii():
"""
JAN START HERE - G
Reads in an ascii file from erdas, parses the information and stores it as a numpy array for plotting.
:return:
"""
# TODO - CHANGE FOR EVERY NEW ASCII
# Here's the path to a particular ascii file to start out.
path = "/Users/Gabe/Desktop/hard_drive_overflow/lt50330372008128pac01_etrf_ndvi_alb_lst_bqa_nad83.asc"
# Give the parser the string of the first column header so that the parsing can be done corectly
first_col = "X"
# TODO- CHANGE EVERY TIME YOU CHANGE THE NUMBER OF COLUMNS IN THE ASCII
# Give the parser the number of cols
num_cols = 7
# changed in function from the last version......
parsed_dict = parse_file(path, first_col, num_cols)
# output the parsed_dict to a csv file and then proceed to filter the csv file for undesirable values...
# first we turn the dictionary into a dataframe...
# build the list of columns for dataframe conversion
colums = []
for key in parsed_dict.keys():
colums.append(key)
df = pd.DataFrame(parsed_dict, columns=colums)
# TODO - CHANGE THE OUTPUT PATH/filename FOR YOUR SYSTEM FOR THE UNFILTERED FILE
# put the unfiltered dataframe out to a .csv in a location of our choice
filename = "landsat" # what do you want the files name to be?
output_path = "/Users/Gabe/Desktop"
# TODO - CHANGE header = True and index = True if you want headers or indices in the .csv
df.to_csv("{}/unfiltered_{}.csv".format(output_path, filename), header=False, index=False)
# Now, filter the data:
# TODO - FILL this list with codes that you want to KEEP
desirable_codes = [672, 676, 680, 684]
# TODO - CHANGE THE CODE COL IF FOR WHATEVER REASON THE COLUMN WITH THE CODES CHANGES LOCATION IN THE ASCII FILE.
code_col = '7'
df = df_filter(df, desirable_codes, code_col)
print "The Filtered Dataframe {}".format(df)
# TODO - CHANGE header = True and index = True if you want headers or indices in the .csv
df.to_csv("{}/filtered_{}.csv".format(output_path, filename), header=False, index=False)
# TODO - CHANGE IF YOU WANT TO PLOT SOMETHING DIFFERENT
# Note that you will need to know which column in the ascii file your variable was in order to plot it here.
# what two variables do you want to plot?
# in this we're plotting columns three and four...
# Since we did df_filter() on the dataframe all we plot here are values that correspond to desirable codes...
x = df['3']
y = df['4']
# TODO - LEAVE THIS UNCOMMENTED FOR PLOTS OF NDVI AND ETRF
# this plotter function is customized for NDVI vs ETRF plotting
plotter(x, y)
# TODO - UNCOMMENT THESE LINES IF YOU ARENT PLOTTING ETRF AND NDVI AGAINST EACH OTHER
# # for a simple x, y plot use
# simple_plot(x, y)
if __name__ == "__main__":
run_ascii() | apache-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/backends/backend_cocoaagg.py | 8 | 10027 | """
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import os, sys
try:
import objc
except ImportError:
raise ImportError('The CococaAgg backend required PyObjC to be installed!')
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
from matplotlib import cbook
cbook.warn_deprecated(
'1.3',
message="The CocoaAgg backend is not a fully-functioning backend. "
"It may be removed in matplotlib 1.4.")
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import ShowBase
from .backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCocoaAgg(figure)
return FigureManagerCocoaAgg(canvas, num)
## Below is the original show() function:
#def show():
# for manager in Gcf.get_all_fig_managers():
# manager.show()
#
## It appears that this backend is unusual in having a separate
## run function invoked for each figure, instead of a single
## mainloop. Presumably there is no blocking at all.
##
## Using the Show class below should cause no difference in
## behavior.
class Show(ShowBase):
def mainloop(self):
pass
show = Show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
dblclick = (event.clickCount() == 2)
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_press_event(loc.x, loc.y, button, dblclick=dblclick)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print('Unable to load Matplotlib Cocoa UI!', file=sys.stderr)
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( 'GetCurrentProcess', S(OSErr, OUTPSN) ),
( 'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( 'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( 'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, six.text_type):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print('ApplicationServices missing', file=sys.stderr)
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print('Missing', fn, file=sys.stderr)
return False
err, psn = d['GetCurrentProcess']()
if err:
print('GetCurrentProcess', (err, psn), file=sys.stderr)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print('CPSSetProcessName', (err, psn), file=sys.stderr)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print('SetFrontProcess', (err, psn), file=sys.stderr)
return False
return True
FigureCanvas = FigureCanvasCocoaAgg
FigureManager = FigureManagerCocoaAgg
| mit |
dwillmer/fastats | tests/linear_algebra/test_qr.py | 2 | 3368 |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_allclose
from pytest import mark
from fastats.core.ast_transforms.convert_to_jit import convert_to_jit
from fastats.linear_algebra import qr, qr_classical_gram_schmidt
from fastats.scaling.scaling import standard
from tests.data.datasets import SKLearnDataSets
qr_jit = convert_to_jit(qr)
qr_classical_gram_schmidt_jit = convert_to_jit(qr_classical_gram_schmidt)
class QRTestMixin:
@staticmethod
def assert_orthonormal(Q):
n = Q.shape[1]
assert_allclose(Q.T @ Q, np.eye(n), atol=1e-10)
@staticmethod
def check_versus_expectations(Q, Q_expected, R, R_expected, A):
assert_allclose(Q, Q_expected)
assert_allclose(R, R_expected)
assert_allclose(Q @ R, A)
def test_ucla_4x3(self):
"""
QR decomposition of a 4x3 matrix, taken from literature directory
'ucla_qr_factorization.pdf':
"""
A = np.array([[-1, -1, 1],
[1, 3, 3],
[-1, -1, 5],
[1, 3, 7]])
Q_expected = np.array([[-0.5, 0.5, -0.5],
[0.5, 0.5, -0.5],
[-0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]])
R_expected = np.array([[2, 4, 2],
[0, 2, 8],
[0, 0, 4]])
Q, R = self.fn(A)
self.check_versus_expectations(Q, Q_expected, R, R_expected, A)
self.assert_orthonormal(Q)
def test_wikipedia_3x3(self):
"""
QR decomposition of a 3x3 matrix, per the following:
https://en.wikipedia.org/wiki/QR_decomposition
"""
A = np.array([[12, -51, 4],
[6, 167, -68],
[-4, 24, -41]])
Q_expected = np.array([[6/7, -69/175, -58/175],
[3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
R_expected = np.array([[14, 21, -14],
[0, 175, -70],
[0, 0, 35]])
Q, R = self.fn(A)
self.check_versus_expectations(Q, Q_expected, R, R_expected, A)
self.assert_orthonormal(Q)
class QRTests(QRTestMixin, TestCase):
def setUp(self):
self.fn = qr
class QRClassicalGSTests(QRTestMixin, TestCase):
def setUp(self):
self.fn = qr_classical_gram_schmidt
class QRJitTests(QRTestMixin, TestCase):
def setUp(self):
self.fn = qr_jit
class QRClassicalGSJitTests(QRTestMixin, TestCase):
def setUp(self):
self.fn = qr_classical_gram_schmidt_jit
def standardise(Q, R):
"""
QR decomposition may not be unique; here we chose to enforce
positive R diagonals to facilitate comparison of solutions
"""
D = np.diag(np.sign(np.diag(R)))
return Q @ D, D @ R
def check_versus_numpy(A, fn):
Q_expected, R_expected = standardise(*np.linalg.qr(A))
Q, R = standardise(*fn(A))
assert_allclose(Q, Q_expected)
assert_allclose(R, R_expected)
@mark.parametrize('dataset', SKLearnDataSets)
def test_sklearn_dataset(dataset):
data = standard(dataset.value)
A = data.T @ data
check_versus_numpy(A, qr_jit)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| mit |
mihirkelkar/BuildingMachineLearningSystemsWithPython | ch09/fft.py | 24 | 3673 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sys
import os
import glob
import numpy as np
import scipy
import scipy.io.wavfile
from utils import GENRE_DIR, CHART_DIR
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
def write_fft(fft_features, fn):
"""
Write the FFT features to separate files to speed up processing.
"""
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print("Written "%data_fn)
def create_fft(fn):
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = abs(scipy.fft(X)[:1000])
write_fft(fft_features, fn)
def read_fft(genre_list, base_dir=GENRE_DIR):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
file_list = glob.glob(genre_dir)
assert(file_list), genre_dir
for fn in file_list:
fft_features = np.load(fn)
X.append(fft_features[:2000])
y.append(label)
return np.array(X), np.array(y)
def plot_wav_fft(wav_filename, desc=None):
plt.clf()
plt.figure(num=None, figsize=(6, 4))
sample_rate, X = scipy.io.wavfile.read(wav_filename)
spectrum = np.fft.fft(X)
freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)
plt.subplot(211)
num_samples = 200.0
plt.xlim(0, num_samples / sample_rate)
plt.xlabel("time [s]")
plt.title(desc or wav_filename)
plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
plt.grid(True)
plt.subplot(212)
plt.xlim(0, 5000)
plt.xlabel("frequency [Hz]")
plt.xticks(np.arange(5) * 1000)
if desc:
desc = desc.strip()
fft_desc = desc[0].lower() + desc[1:]
else:
fft_desc = wav_filename
plt.title("FFT of %s" % fft_desc)
plt.plot(freq, abs(spectrum), linewidth=5)
plt.grid(True)
plt.tight_layout()
rel_filename = os.path.split(wav_filename)[1]
plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
bbox_inches='tight')
plt.show()
def plot_wav_fft_demo():
plot_wav_fft("sine_a.wav", "400Hz sine wave")
plot_wav_fft("sine_b.wav", "3,000Hz sine wave")
plot_wav_fft("sine_mix.wav", "Mixed sine wave")
def plot_specgram(ax, fn):
sample_rate, X = scipy.io.wavfile.read(fn)
ax.specgram(X, Fs=sample_rate, xextent=(0, 30))
def plot_specgrams(base_dir=CHART_DIR):
"""
Plot a bunch of spectrograms of wav files in different genres
"""
plt.clf()
genres = ["classical", "jazz", "country", "pop", "rock", "metal"]
num_files = 3
f, axes = plt.subplots(len(genres), num_files)
for genre_idx, genre in enumerate(genres):
for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))):
if idx == num_files:
break
axis = axes[genre_idx, idx]
axis.yaxis.set_major_formatter(EngFormatter())
axis.set_title("%s song %i" % (genre, idx + 1))
plot_specgram(axis, fn)
specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png")
plt.savefig(specgram_file, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
# for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")):
# create_fft(fn)
# plot_decomp()
if len(sys.argv) > 1:
plot_wav_fft(sys.argv[1], desc="some sample song")
else:
plot_wav_fft_demo()
plot_specgrams()
| mit |
rgommers/statsmodels | statsmodels/examples/ex_regressionplots.py | 34 | 4457 | # -*- coding: utf-8 -*-
"""Examples for Regression Plots
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import statsmodels.graphics.regressionplots as smrp
#example from tut.ols with changes
#fix a seed for these examples
np.random.seed(9876789)
# OLS non-linear curve but linear in parameters
# ---------------------------------------------
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate only linear function, misspecified because of non-linear terms
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
# plt.figure()
# plt.plot(x1, y, 'o', x1, y_true, 'b-')
res = sm.OLS(y, exog0).fit()
#print res.params
#print res.bse
plot_old = 0 #True
if plot_old:
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res)
plt.plot(x1, res.fittedvalues, 'r-o')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
plt.figure()
plt.plot(res.resid, 'o')
plt.title('Residuals')
fig2 = plt.figure()
ax = fig2.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.resid, 'o')
ax.set_title('residuals versus exog')# + namestr)
ax = fig2.add_subplot(2,1,2)
plt.plot(x2, res.resid, 'o')
fig3 = plt.figure()
ax = fig3.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted values versus exog')# + namestr)
ax = fig3.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues, 'o')
fig4 = plt.figure()
ax = fig4.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted values plus residuals versus exog')# + namestr)
ax = fig4.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues + res.resid, 'o')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
fig5 = plt.figure()
ax = fig5.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
res1a = sm.OLS(y, exog0[:,[0,2]]).fit()
res1b = sm.OLS(x1, exog0[:,[0,2]]).fit()
plt.plot(res1b.resid, res1a.resid, 'o')
res1c = sm.OLS(res1a.resid, res1b.resid).fit()
plt.plot(res1b.resid, res1c.fittedvalues, '-')
ax.set_title('Partial Regression plot')# + namestr)
ax = fig5.add_subplot(2,1,2)
#plt.plot(x2, res.fittedvalues + res.resid, 'o')
res2a = sm.OLS(y, exog0[:,[0,1]]).fit()
res2b = sm.OLS(x2, exog0[:,[0,1]]).fit()
plt.plot(res2b.resid, res2a.resid, 'o')
res2c = sm.OLS(res2a.resid, res2b.resid).fit()
plt.plot(res2b.resid, res2c.fittedvalues, '-')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
fig6 = plt.figure()
ax = fig6.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
x2beta = x2*res.params[2]
plt.plot(x1, x1beta + res.resid, 'o')
plt.plot(x1, x1beta, '-')
ax.set_title('X_i beta_i plus residuals versus exog (CCPR)')# + namestr)
ax = fig6.add_subplot(2,1,2)
plt.plot(x2, x2beta + res.resid, 'o')
plt.plot(x2, x2beta, '-')
#print res.summary()
doplots = 1
if doplots:
fig1 = smrp.plot_fit(res, 0, y_true=None)
smrp.plot_fit(res, 1, y_true=None)
smrp.plot_partregress_grid(res, exog_idx=[0,1])
smrp.plot_regress_exog(res, exog_idx=0)
smrp.plot_ccpr(res, exog_idx=0)
smrp.plot_ccpr_grid(res, exog_idx=[0,1])
from statsmodels.graphics.tests.test_regressionplots import TestPlot
tp = TestPlot()
tp.test_plot_fit()
fig1 = smrp.plot_partregress_grid(res, exog_idx=[0,1])
#add lowess
ax = fig1.axes[0]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
ax = fig1.axes[1]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
#plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/texmanager.py | 8 | 25543 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = ('\\TeX\\ is Number '
'$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!')
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file or include these two lines in
your script::
from matplotlib import rc
rc('text', usetex=True)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import copy
import glob
import os
import shutil
import sys
import warnings
from hashlib import md5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
from matplotlib.cbook import mkdirs
from matplotlib.compat.subprocess import Popen, PIPE, STDOUT
import matplotlib.dviread as dviread
import re
DEBUG = False
if sys.platform.startswith('win'):
cmd_split = '&'
else:
cmd_split = ';'
def dvipng_hack_alpha():
try:
p = Popen(['dvipng', '-version'], stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=(sys.platform != 'win32'))
stdout, stderr = p.communicate()
except OSError:
mpl.verbose.report('No dvipng was found', 'helpful')
return False
lines = stdout.decode(sys.getdefaultencoding()).split('\n')
for line in lines:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s' % version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
mpl.verbose.report('Unexpected response from dvipng -version', 'helpful')
return False
class TexManager(object):
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None:
oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
cachedir = mpl.get_cachedir()
if cachedir is not None:
texcache = os.path.join(cachedir, 'tex.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not creating a cache directory.
texcache = None
if os.path.exists(oldcache):
if texcache is not None:
try:
shutil.move(oldcache, texcache)
except IOError as e:
warnings.warn('File could not be renamed: %s' % e)
else:
warnings.warn("""\
Found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s".""" % (oldcache, texcache))
else:
warnings.warn("""\
Could not rename old TeX cache dir "%s": a suitable configuration
directory could not be found.""" % oldcache)
if texcache is not None:
mkdirs(texcache)
_dvipng_hack_alpha = None
#_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', '\\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', '\\usepackage{mathptmx}'),
'palatino': ('ppl', '\\usepackage{mathpazo}'),
'zapf chancery': ('pzc', '\\usepackage{chancery}'),
'cursive': ('pzc', '\\usepackage{chancery}'),
'charter': ('pch', '\\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', '\\usepackage{helvet}'),
'avant garde': ('pag', '\\usepackage{avant}'),
'courier': ('pcr', '\\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = (('text.latex.preamble', ) +
tuple(['font.' + n for n in ('family', ) +
font_families]))
def __init__(self):
if self.texcache is None:
raise RuntimeError(
('Cannot create TexManager, as there is no cache directory '
'available'))
mkdirs(self.texcache)
ff = rcParams['font.family']
if len(ff) == 1 and ff[0].lower() in self.font_families:
self.font_family = ff[0].lower()
elif isinstance(ff, six.string_types) and ff.lower() in self.font_families:
self.font_family = ff.lower()
else:
mpl.verbose.report(
'font.family must be one of (%s) when text.usetex is True. '
'serif will be used by default.' %
', '.join(self.font_families),
'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in [(ff, ff.replace('-', '_'))
for ff in self.font_families]:
for font in rcParams['font.' + font_family]:
if font.lower() in self.font_info:
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print('family: %s, font: %s, info: %s' %
(font_family, font,
self.font_info[font.lower()]))
break
else:
if DEBUG:
print('$s font is not compatible with usetex')
else:
mpl.verbose.report('No LaTeX-compatible font found for the '
'%s font family in rcParams. Using '
'default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
# Add a hash of the latex preamble to self._fontconfig so that the
# correct png is selected for strings rendered with same font and dpi
# even if the latex preamble changes within the session
preamble_bytes = six.text_type(self.get_custom_preamble()).encode('utf-8')
fontconfig.append(md5(preamble_bytes).hexdigest())
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive':
cmd.append(self.cursive[1])
while '\\usepackage{type1cm}' in cmd:
cmd.remove('\\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join(['\\usepackage{type1cm}', cmd,
'\\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = six.text_type(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k, None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys
if rcParams[par] != self._rc_cache[par]]
if changed:
if DEBUG:
print('DEBUG following keys changed:', changed)
for k in changed:
if DEBUG:
print('DEBUG %-20s: %-10s -> %-10s' %
(k, self._rc_cache[k], rcParams[k]))
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG:
print('DEBUG RE-INIT\nold fontconfig:', self._fontconfig)
self.__init__()
if DEBUG:
print('DEBUG fontconfig:', self._fontconfig)
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s' % os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
\\pagestyle{empty}
\\begin{document}
\\fontsize{%f}{%f}%s
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
_re_vbox = re.compile(
r"MatplotlibBox:\(([\d.]+)pt\+([\d.]+)pt\)x([\d.]+)pt")
def make_tex_preview(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific
font size. It uses the preview.sty to determin the dimension
(width, height, descent) of the output.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
# newbox, setbox, immediate, etc. are used to find the box
# extent of the rendered text.
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[active,showbox,tightpage]{preview}
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
%% we override the default showbox as it is treated as an error and makes
%% the exit status not zero
\\def\\showbox#1{\\immediate\\write16{MatplotlibBox:(\\the\\ht#1+\\the\\dp#1)x\\the\\wd#1}}
\\begin{document}
\\begin{preview}
{\\fontsize{%f}{%f}%s}
\\end{preview}
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
if rcParams['text.latex.preview']:
return self.make_dvi_preview(tex, fontsize)
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'latex -interaction=nonstopmode %s > "%s"' %
(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No latex error report available.'
try:
os.stat(dvifile)
exists = True
except OSError:
exists = False
if exit_status or not exists:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\nHere is the full report generated by '
'LaTeX: \n\n' % repr(tex.encode('unicode_escape')) +
report))
else:
mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_dvi_preview(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex
string. It calls make_tex_preview() method and store the size
information (width, height, descent) in a separte file.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
baselinefile = '%s.baseline' % basefile
if (DEBUG or not os.path.exists(dvifile) or
not os.path.exists(baselinefile)):
texfile = self.make_tex_preview(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'latex -interaction=nonstopmode %s > "%s"' %
(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\nHere is the full report generated by '
'LaTeX: \n\n' % repr(tex)) + report)
else:
mpl.verbose.report(report, 'debug')
# find the box extent information in the latex output
# file and store them in ".baseline" file
m = TexManager._re_vbox.search(report)
with open(basefile + '.baseline', "w") as fh:
fh.write(" ".join(m.groups()))
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
elif fname.endswith('baseline'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png' % basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o "%s" "%s" > "%s"' %
(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError(
'dvipng was not able to process the following '
'file:\n%s\nHere is the full report generated by '
'dvipng: \n\n' % dvifile + report)
else:
mpl.verbose.report(report, 'debug')
try:
os.remove(outfile)
except OSError:
pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf' % basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"' %
(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
with open(outfile) as fh:
if exit_status:
raise RuntimeError(
'dvipng was not able to process the flowing '
'file:\n%s\nHere is the full report generated by '
'dvipng: \n\n' % dvifile + fh.read())
else:
mpl.verbose.report(fh.read(), 'debug')
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
with open(psfile) as ps:
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s' % psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
if TexManager._dvipng_hack_alpha is None:
TexManager._dvipng_hack_alpha = dvipng_hack_alpha()
hack = TexManager._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1 - X[:, :, 0]
else:
alpha = X[:, :, -1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize:
fontsize = rcParams['font.size']
if not dpi:
dpi = rcParams['savefig.dpi']
r, g, b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:, :, 0] = r
Z[:, :, 1] = g
Z[:, :, 2] = b
Z[:, :, 3] = alpha
self.rgba_arrayd[key] = Z
return Z
def get_text_width_height_descent(self, tex, fontsize, renderer=None):
"""
return width, heigth and descent of the text.
"""
if tex.strip() == '':
return 0, 0, 0
if renderer:
dpi_fraction = renderer.points_to_pixels(1.)
else:
dpi_fraction = 1.
if rcParams['text.latex.preview']:
# use preview.sty
basefile = self.get_basefile(tex, fontsize)
baselinefile = '%s.baseline' % basefile
if DEBUG or not os.path.exists(baselinefile):
dvifile = self.make_dvi_preview(tex, fontsize)
with open(baselinefile) as fh:
l = fh.read().split()
height, depth, width = [float(l1) * dpi_fraction for l1 in l]
return width, height + depth, depth
else:
# use dviread. It sometimes returns a wrong descent.
dvifile = self.make_dvi(tex, fontsize)
dvi = dviread.Dvi(dvifile, 72 * dpi_fraction)
try:
page = next(iter(dvi))
finally:
dvi.close()
# A total height (including the descent) needs to be returned.
return page.width, page.height + page.descent, page.descent
| mit |
HeraclesHX/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
liukaijv/XlsxWriter | examples/pandas_chart.py | 9 | 1049 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a chart
# using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
# Create a Pandas dataframe from some data.
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('pandas_chart.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Create a chart object.
chart = workbook.add_chart({'type': 'column'})
# Configure the series of the chart from the dataframe data.
chart.add_series({'values': '=Sheet1!$B$2:$B$8'})
# Insert the chart into the worksheet.
worksheet.insert_chart('D2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
tdhopper/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
numenta-archive/htmresearch | htmresearch/frameworks/dimensionality_reduction/proj.py | 6 | 2368 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Utility functions to project high-dimensional vectors (such as SDRs) in 2D"""
import numpy as np
from sklearn.manifold import TSNE, MDS
def project_in_2D(distance_mat, method='mds'):
"""
Project SDRs onto a 2D space using manifold learning algorithms
:param distance_mat: A square matrix with pairwise distances
:param method: Select method from 'mds' and 'tSNE'
:return: an array with dimension (numSDRs, 2). It contains the 2D projections
of each SDR
"""
seed = np.random.RandomState(seed=3)
if method == 'mds':
mds = MDS(n_components=2, max_iter=3000, eps=1e-9,
random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(distance_mat).embedding_
nmds = MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed,
n_jobs=1, n_init=1)
pos = nmds.fit_transform(distance_mat, init=pos)
elif method == 'tSNE':
tsne = TSNE(n_components=2, init='pca', random_state=0)
pos = tsne.fit_transform(distance_mat)
else:
raise NotImplementedError
return pos
def project_matrix(mat):
tsne = TSNE(n_iter=1000, metric='precomputed', init='random')
return tsne.fit_transform(mat)
def project_vectors(vectors, distance):
tsne = TSNE(metric=distance, n_iter=500, init='pca')
return tsne.fit_transform(vectors)
| agpl-3.0 |
leal26/AeroPy | aeropy/geometry/fitting.py | 2 | 4127 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import math
from scipy.optimize import minimize, differential_evolution
from scipy.spatial.distance import directed_hausdorff
from multiprocessing import Pool
import time
from aeropy.xfoil_module import output_reader
class fitting():
def __init__(self, **params):
def _callback(*args):
return params.get('callback', None)(self.object, *args)
self.object = params.get('object', np.linspace(10, 50, 9))
self.update = params.get('update', np.linspace(10, 50, 9))
self.x0 = params.get('x0', np.linspace(10, 50, 9))
self.p1 = params.get('p1', np.linspace(10, 50, 9))
self.p2 = params.get('p2', np.linspace(10, 50, 9))
self.p1_name = params.get('p1_name', 'Parameter 1')
self.p2_name = params.get('p2_name', 'Parameter 2')
self.calculate_points = params.get('calculate_points', 0)
self.raw = params.get('raw', 0)
self.callback = _callback
def convergence_study(self, parallel=True):
P1_f, P2_f = self._format_parameters()
if parallel:
p = Pool(len(P1_f))
else:
p = Pool(1)
input = np.vstack([P1_f, P2_f]).T
self.solutions = p.map(self.find, input)
self.error = np.array([self.solutions[i]['fun'] for i in
range(len(self.solutions))])
self.error = self.error.reshape(self.P1.shape)
self.rel_error = self.error/self.error[0][0]
def find(self, param_i=[None, None]):
'''
inputs: [location, XYZ, sy, ny, xshear]'''
p1_i, p2_i = param_i
x0 = self.x0(p1_i, p2_i)
start = time.time()
solution = minimize(self.shape_difference, x0, args=param_i)
end = time.time()
error = solution['fun']
if p1_i is None:
print('error=%f\t time=%f' % (error, end-start))
else:
print('p1=%i\t p2=%i\t error=%f\t time=%f' % (p1_i, p2_i, error,
end-start))
if self.callback is not None:
if p1_i is None:
self.callback()
else:
self.callback(p1_i, p2_i)
return solution
def shape_difference(self, x, param_i):
p1_i, p2_i = param_i
self.update(self.object, x, p1_i, p2_i)
points = self.calculate_points(self.object, self.raw)
if self.raw.ndim == 3:
error = 0
N = 0
for i in range(len(points)):
error += np.linalg.norm(points[i] - self.raw[i])**2
N += len(points[i])
else:
error = np.linalg.norm(points[0] - self.raw)**2
N = len(points)
return(error/N)
def plot_study(self, relative=True):
if relative:
z = self.rel_error
else:
z = self.error
fig, ax = plt.subplots()
cs = ax.contourf(self.P1, self.P2, z, np.linspace(0, 1, 101))
fig.colorbar(cs, ticks=np.linspace(0, 1, 6))
# plt.clim(0, 1)
plt.xlabel(self.p1_name)
plt.ylabel(self.p2_name)
plt.show()
def plot_fit(self):
network = self.calculate_points(self.object, self.raw)
plt.figure()
if network.ndim == 3:
for i in range(len(network)):
plt.scatter(network[i, :, 0], network[i, :, 2], c='b',
label='fit')
plt.scatter(self.raw[i, :, 0], self.raw[i, :, 2], c='r',
label='raw')
else:
plt.scatter(network[:, 0], network[:, 2], c='b', label='fit')
plt.scatter(self.raw[:, 0], self.raw[:, 2], c='r', label='raw')
plt.legend()
plt.show()
def _format_parameters(self, array_type=int):
self.p1 = self.p1.astype(array_type)
self.p2 = self.p2.astype(array_type)
self.P1, self.P2 = np.meshgrid(self.p1, self.p2)
P1_f = self.P1.flatten()
P2_f = self.P2.flatten()
return(P1_f, P2_f)
| mit |
kcavagnolo/astroML | book_figures/chapter10/fig_rrlyrae_reconstruct.py | 3 | 2472 | """
Fourier Reconstruction of RR-Lyrae Templates
--------------------------------------------
Figure 10.1
An example of a truncated Fourier representation of an RR Lyrae light curve.
The thick dashed line shows the true curve; the gray lines show the
approximation based on 1, 3, and 8 Fourier modes (sinusoids).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_rrlyrae_templates
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Load the RR Lyrae template
templates = fetch_rrlyrae_templates()
x, y = templates['115r'].T
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0)
kvals = [1, 3, 8]
subplots = [311, 312, 313]
for (k, subplot) in zip(kvals, subplots):
ax = fig.add_subplot(subplot)
# Use FFT to fit a truncated Fourier series
y_fft = np.fft.fft(y)
y_fft[k + 1:-k] = 0
y_fit = np.fft.ifft(y_fft).real
# plot the true value and the k-term reconstruction
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y, y]), '--k', lw=2)
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y_fit, y_fit]), color='gray')
label = "%i mode" % k
if k > 1:
label += 's'
ax.text(0.02, 0.1, label, ha='left', va='bottom',
transform=ax.transAxes)
if subplot == subplots[-1]:
ax.set_xlabel('phase')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
if subplot == subplots[1]:
ax.set_ylabel('amplitude')
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlim(0, 2)
ax.set_ylim(1.1, -0.1)
plt.show()
| bsd-2-clause |
CarterBain/AlephNull | alephnull/examples/dual_ema_talib.py | 1 | 3220 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from alephnull.algorithm import TradingAlgorithm
from alephnull.utils.factory import load_from_yahoo
# Import exponential moving average from talib wrapper
from alephnull.transforms.ta import EMA
from datetime import datetime
import pytz
class DualEMATaLib(TradingAlgorithm):
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
def initialize(self, short_window=20, long_window=40):
# Add 2 mavg transforms, one with a long window, one
# with a short window.
self.short_ema_trans = EMA(timeperiod=short_window)
self.long_ema_trans = EMA(timeperiod=long_window)
# To keep track of whether we invested in the stock or not
self.invested = False
def handle_data(self, data):
self.short_ema = self.short_ema_trans.handle_data(data)
self.long_ema = self.long_ema_trans.handle_data(data)
if self.short_ema is None or self.long_ema is None:
return
self.buy = False
self.sell = False
if self.short_ema > self.long_ema and not self.invested:
self.order('AAPL', 100)
self.invested = True
self.buy = True
elif self.short_ema < self.long_ema and self.invested:
self.order('AAPL', -100)
self.invested = False
self.sell = True
self.record(AAPL=data['AAPL'].price,
short_ema=self.short_ema['AAPL'],
long_ema=self.long_ema['AAPL'],
buy=self.buy,
sell=self.sell)
if __name__ == '__main__':
start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
dma = DualEMATaLib()
results = dma.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
| apache-2.0 |
samkreter/SWE-project | db_populate.py | 1 | 1242 | #!flask/bin/python3
from app import db, models
import numpy as np
import pandas as pd
from faker import Faker
import bcrypt
fake = Faker()
data = pd.read_csv('companies.csv')
for comName in data.columns:
name = fake.name().split()
u = models.User(first_name=name[0],last_name=name[1],email=fake.email(),password=bcrypt.hashpw('heythere'.encode('UTF_8'),bcrypt.gensalt(14)))
p = models.Profile(user=u)
c = models.Company(owner=u,name=comName,address=fake.address())
db.session.add(u)
db.session.add(p)
db.session.add(c)
db.session.commit()
for job in data[comName]:
if pd.isnull(job):
break
db.session.add(models.Job(title=job,company=c))
db.session.commit()
# for i in range(data[0,:].size):
# name = fake.name().split()
# u = models.User(first_name=name[0],last_name=name[1],email=fake.email(),password=bcrypt.hashpw('heythere'.encode('UTF_8'),bcrypt.gensalt(14)))
# p = models.Profile(user=u)
# c = models.Company(owner=u,name=data[0,i],address=fake.address())
# db.session.add(u)
# db.session.add(p)
# db.session.add(c)
# for job in data[:,i]:
# db.session.add(models.Job(title=job,company=c))
# db.session.commit()
| mit |
mlesniew/PiFmRds | src/generate_waveforms.py | 15 | 2403 | #!/usr/bin/python
# PiFmRds - FM/RDS transmitter for the Raspberry Pi
# Copyright (C) 2014 Christophe Jacquet, F8FTK
#
# See https://github.com/ChristopheJacquet/PiFmRds
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This program generates the waveform of a single biphase symbol
#
# This program uses Pydemod, see https://github.com/ChristopheJacquet/Pydemod
import pydemod.app.rds as rds
import numpy
import scipy.io.wavfile as wavfile
import io
import matplotlib.pyplot as plt
sample_rate = 228000
outc = io.open("waveforms.c", mode="w", encoding="utf8")
outh = io.open("waveforms.h", mode="w", encoding="utf8")
header = u"""
/* This file was automatically generated by "generate_waveforms.py".
(C) 2014 Christophe Jacquet.
Released under the GNU GPL v3 license.
*/
"""
outc.write(header)
outh.write(header)
def generate_bit(name):
offset = 240
l = 96
count = 2
sample = numpy.zeros(3*l)
sample[l] = 1
sample[2*l] = -1
# Apply the data-shaping filter
sf = rds.pulse_shaping_filter(96*8, 228000)
shapedSamples = numpy.convolve(sample, sf)
out = shapedSamples[528-288:528+288] #[offset:offset+l*count]
#plt.plot(sf)
#plt.plot(out)
#plt.show()
iout = (out * 20000./max(abs(out)) ).astype(numpy.dtype('>i2'))
wavfile.write(u"waveform_{}.wav".format(name), sample_rate, iout)
outc.write(u"float waveform_{name}[] = {{{values}}};\n\n".format(
name = name,
values = u", ".join(map(unicode, out/2.5))))
# note: need to limit the amplitude so as not to saturate when the biphase
# waveforms are summed
outh.write(u"extern float waveform_{name}[{size}];\n".format(name=name, size=len(out)))
generate_bit("biphase")
outc.close()
outh.close() | gpl-3.0 |
alberto-antonietti/nest-simulator | pynest/examples/brette_gerstner_fig_3d.py | 12 | 3030 | # -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
nest.voltage_trace.show()
| gpl-2.0 |
louispotok/pandas | pandas/tests/indexes/timedeltas/test_astype.py | 5 | 2934 | from datetime import timedelta
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import (TimedeltaIndex, timedelta_range, Int64Index, Float64Index,
Index, Timedelta, NaT)
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
| bsd-3-clause |
asoliveira/NumShip | scripts/plot/xy-zz-plt.py | 1 | 2276 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'xy-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
#Qual a cor dos gráficos?
pc = 'k'
bc = 'k'
lc = 'k'
brlc = 'k'
rc = 'k'
#Estilo de linha
ps = '-'
bs = '-.'
rs = '.'
ls = '+'
brls = '^'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
poshis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/pos.dat')
poshis2 = sp.genfromtxt('../entrada/beta/saida1.2/CurvaZigZag/pos.dat')
poshis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/pos.dat')
poshis4 = sp.genfromtxt('../entrada/leme/saida1.2/CurvaZigZag/pos.dat')
poshis5 = sp.genfromtxt('../entrada/brl/saida1.2/CurvaZigZag/pos.dat')
axl = [0, 8000, -200, 600]
#Plotando a Curva de ZigZag
if adi:
ylabel = r'$y\prime$'
xposlabel = r'$x\prime$'
else:
ylabel = r'$y \quad m$'
xposlabel = r'$x \quad m$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(poshis[:, 1], poshis[:, 2], color = pc, linestyle = ps,
linewidth = 1, label=ur'padrão')
plt.plot(poshis2[:, 1], poshis2[:, 2], color = bc,linestyle = bs,
linewidth = 2, label=ur'1.2beta')
plt.scatter(somep(poshis3[:, 1], num = 200), somep(poshis3[:, 2], num = 200),
color = rc, marker = rs, s = 8, label=ur'1.2r')
plt.scatter(somep(poshis4[:, 1], num = 100),
somep(poshis4[:, 2], num = 100), color = lc, marker = ls, s = 20,
label=ur'1.2leme')
plt.scatter(somep(poshis5[:, 1], num = 100), somep(poshis5[:, 2], num = 100),
color = brlc, marker = brls, s = 14, label=ur'1.2brl')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xposlabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
ZeeD26/pydougu | pydougu/matplotlib/colormaps/_viridis.py | 1 | 12689 | # -*- coding: utf-8 -*-
from matplotlib.colors import ListedColormap
__all__ = ['viridis']
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
viridis = ListedColormap(_viridis_data, 'viridis')
| mit |
neuroidss/nupic.research | projects/sp_paper/plot_traces_with_errorbars.py | 10 | 5345 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot continuous learning experiment result with error bars
Run './runRepeatedExperiment.sh' in terminal before using this script
"""
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import rcParams
from scipy import stats
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def convertToNumpyArray(trace):
for k in trace.keys():
if k == 'expName':
continue
n = len(trace[k])
trace[k] = np.reshape(np.array(trace[k]), (n, 1))
return trace
def concatenateTraces(trace1, trace2):
metrics = {'numConnectedSyn': [],
'numNewSyn': [],
'numRemoveSyn': [],
'stability': [],
'entropy': [],
'maxEntropy': [],
'sparsity': [],
'noiseRobustness': [],
'classification': [],
'meanBoostFactor': [],
'reconstructionError': [],
'witnessError': []}
for k in metrics.keys():
metrics[k] = np.concatenate((np.array(trace1[k]),
np.array(trace2[k])), 1)
return metrics
def calculateMeanStd(trace):
meanTrace = np.mean(trace, axis=1)
stdTrace = np.std(trace, axis=1)
return (meanTrace, stdTrace)
def plotBarWithErr(ax, y, yerr, ylabel, xtickLabels):
inds = np.arange(len(y))
ax.bar(inds+.2, y, yerr=yerr, width=0.6)
ax.set_ylabel(ylabel)
ax.set_xticks(inds+.5)
ax.set_xticklabels(xtickLabels)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_rotation('vertical')
if __name__ == "__main__":
traceAll = None
changeDataAt = 50
for seed in range(1, 11):
expName = 'randomSDRVaryingSparsityContinuousLearning_seed_{}'.format(seed)
trace = pickle.load(open('./results/traces/{}/trace'.format(expName), 'rb'))
trace = convertToNumpyArray(trace)
if traceAll is None:
traceAll = trace
else:
traceAll = concatenateTraces(traceAll, trace)
traceAll['stability'][changeDataAt, :] = traceAll['stability'][changeDataAt-1, :]
tracesToPlot = ['stability', 'entropy', 'noiseRobustness',
'numNewSyn', 'numRemoveSyn']
ylabelList = ['Stability', 'Entropy (bits)', 'Noise Robustness',
'Synapses Formation', 'Synapse Removal']
numEpochs, numRpts = traceAll['entropy'].shape
fig, axs = plt.subplots(nrows=len(tracesToPlot), ncols=1, sharex=True)
for i in range(len(tracesToPlot)):
traceName = tracesToPlot[i]
(mean, std) = calculateMeanStd(traceAll[traceName])
color = 'k'
x = range(numEpochs)
axs[i].fill_between(x, mean - std, mean + std,
alpha=0.3, edgecolor=color, facecolor=color)
axs[i].plot(x, mean, color, color=color, linewidth=.5)
axs[i].set_ylabel(ylabelList[i])
axs[i].plot([changeDataAt, changeDataAt], axs[i].get_ylim(), 'k--')
# adjust axis limit and tick spacings
for i in [3, 4]:
yl = axs[i].get_ylim()
axs[i].set_ylim([0, yl[1]])
axs[0].set_yticks(np.linspace(.6, 1, 5))
axs[1].set_yticks(np.linspace(.08, .14, 4))
axs[4].set_xlabel('Epochs')
plt.savefig('figures/ContinuousLearning_WithErrBars.pdf')
rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots(nrows=1, ncols=3)
checkPoints = [0, 49, 50, 119]
meanEntropy = np.mean(traceAll['entropy'][checkPoints, :], 1)
stdEntropy = np.std(traceAll['entropy'][checkPoints, :], 1)
maxEntropy = np.mean(np.mean(traceAll['maxEntropy'][10:, :], 1))
print "test entropy difference before/after learning: "
print stats.ttest_rel(traceAll['entropy'][0, :], traceAll['entropy'][49, :])
meanNoiseRobustness = np.mean(traceAll['noiseRobustness'][checkPoints, :], 1)
stdNoiseRobustness = np.std(traceAll['noiseRobustness'][checkPoints, :], 1)
xtickLabels = ['Before Training', 'Before Change', 'After Change',
'After Recovery']
print "test noise robustness before/after learning: "
print stats.ttest_rel(traceAll['noiseRobustness'][0, :], traceAll['noiseRobustness'][49, :])
plotBarWithErr(ax[0], meanEntropy, stdEntropy, 'Entropy (bits)', xtickLabels)
ax[0].plot(ax[0].get_xlim(), [maxEntropy, maxEntropy], 'k--')
plotBarWithErr(ax[1], meanNoiseRobustness, stdNoiseRobustness,
'Noise Robustness', xtickLabels)
plt.savefig('figures/ContinuousLearning_BarChart.pdf')
| agpl-3.0 |
gwpy/gwpy | gwpy/plot/tests/test_colors.py | 3 | 1881 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.colors`
"""
import pytest
from numpy.testing import assert_array_equal
from matplotlib.colors import (Normalize, LogNorm)
from .. import colors as plot_colors
@pytest.mark.parametrize('in_, factor, out', [
('red', 1., (1., 0., 0.)),
((1., 0., 0.), 1., (1., 0., 0.)),
('green', .75, (0.0, 0.37647058823529411, 0.0)),
])
def test_tint(in_, factor, out):
assert_array_equal(plot_colors.tint(in_, factor=factor), out)
def test_format_norm():
# defaults
norm, kwargs = plot_colors.format_norm({})
assert isinstance(norm, Normalize)
assert kwargs == {}
# log norm
norm, kwargs = plot_colors.format_norm(
{'norm': 'log', 'vmin': 1, 'vmax': 10})
assert isinstance(norm, LogNorm)
assert norm.vmin == 1
assert norm.vmax == 10
# existing norm, change limits
n = LogNorm()
norm, kwargs = plot_colors.format_norm(
{'norm': n, 'clim': (10, 1000)})
assert norm is n
assert norm.vmin == 10
assert norm.vmax == 1000
# check clim=None is honoured
norm, kwargs = plot_colors.format_norm({'clim': None})
assert norm.vmin is None and norm.vmax is None
| gpl-3.0 |
LiaoPan/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
edublancas/sklearn-model-evaluation | versioneer.py | 1 | 7720 | """
Script for creating new releases
Maybe I should switch to this:
https://blog.mozilla.org/warner/2012/01/31/version-string-management-in-python-introducing-python-versioneer/
"""
import ast
import re
from subprocess import call as _call
from functools import reduce
import datetime
import click
TESTING = False
PACKAGE = 'src/sklearn_evaluation'
PACKAGE_NAME = 'sklearn_evaluation'
def replace_in_file(path_to_file, original, replacement):
"""Replace string in file
"""
with open(path_to_file, 'r+') as f:
content = f.read()
updated = content.replace(original, replacement)
f.seek(0)
f.write(updated)
f.truncate()
def read_file(path_to_file):
with open(path_to_file, 'r') as f:
content = f.read()
return content
def call(*args, **kwargs):
"""Mocks call function for testing
"""
if TESTING:
print(args, kwargs)
return 0
else:
return _call(*args, **kwargs)
class Versioner(object):
"""Utility functions to manage versions
"""
@classmethod
def current_version(cls):
"""Returns the current version in __init__.py
"""
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{package}/__init__.py'.format(package=PACKAGE), 'rb') as f:
VERSION = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return VERSION
@classmethod
def release_version(cls):
"""
Returns a release version number
e.g. 2.4.4dev -> v.2.2.4
"""
current = cls.current_version()
if 'dev' not in current:
raise ValueError('Current version is not a dev version')
return current.replace('dev', '')
@classmethod
def bump_up_version(cls):
"""
Gets gets a release version and returns a the next value value.
e.g. 1.2.5 -> 1.2.6dev
"""
# Get current version
current = cls.current_version()
if 'dev' in current:
raise ValueError('Current version is dev version, new dev '
'versions can only be made from release versions')
# Get Z from X.Y.Z and sum 1
new_subversion = int(current.split('.')[-1]) + 1
# Replace new_subversion in current version
elements = current.split('.')
elements[-1] = new_subversion
new_version = reduce(lambda x, y: str(x)+'.'+str(y), elements)+'dev'
return new_version
@classmethod
def commit_version(cls, new_version, tag=False):
"""
Replaces version in __init__ and optionally creates a tag in the git
repository (also saves a commit)
"""
current = cls.current_version()
# replace new version in __init__.py
replace_in_file('{package}/__init__.py'.format(package=PACKAGE),
current, new_version)
# Create tag
if tag:
# Run git add and git status
click.echo('Adding new changes to the repository...')
call(['git', 'add', '--all'])
call(['git', 'status'])
# Commit repo with updated dev version
click.echo('Creating new commit release version...')
msg = 'Release {}'.format(new_version)
call(['git', 'commit', '-m', msg])
click.echo('Creating tag {}...'.format(new_version))
message = '{} release {}'.format(PACKAGE_NAME, new_version)
call(['git', 'tag', '-a', new_version, '-m', message])
click.echo('Pushing tags...')
call(['git', 'push', 'origin', new_version])
@classmethod
def update_changelog_release(cls, new_version):
current = cls.current_version()
# update CHANGELOG header
header_current = '{ver}\n'.format(ver=current)+'-'*len(current)
today = datetime.datetime.now().strftime('%Y-%m-%d')
header_new = '{ver} ({today})\n'.format(ver=new_version, today=today)
header_new = header_new+'-'*len(header_new)
replace_in_file('CHANGELOG.rst', header_current, header_new)
@classmethod
def add_changelog_dev_section(cls, dev_version):
# add new CHANGELOG section
start_current = 'Changelog\n========='
start_new = (('Changelog\n=========\n\n{dev_version}\n'
.format(dev_version=dev_version)
+ '-' * len(dev_version)) + '\n')
replace_in_file('CHANGELOG.rst', start_current, start_new)
@click.group()
def cli():
"""Automates release a new version and uploading it to PyPI
1. MANUAL: Merge whatever you want to publish to master
2. MANUAL: Update your CHANGELOG.rst
2. CREATE A NEW VERSION: python versioneer.py new
3. PUBLISH: python versioneer.py release [TAG] --production
"""
pass
@cli.command(help='Sets a new version for the project: Updates __version__, changelog and commits')
def new():
"""
Create a new version for the project: updates __init__.py, CHANGELOG,
creates new commit for released version (creating a tag) and commits
to a new dev version
"""
current = Versioner.current_version()
release = Versioner.release_version()
release = click.prompt('Current version in app.yaml is {current}. Enter'
' release version'.format(current=current,
release=release),
default=release, type=str)
Versioner.update_changelog_release(release)
changelog = read_file('CHANGELOG.rst')
click.confirm('\nCHANGELOG.rst:\n\n{}\n Continue?'.format(changelog),
'done', abort=True)
# Replace version number and create tag
click.echo('Commiting release version: {}'.format(release))
Versioner.commit_version(release, tag=True)
# Create a new dev version and save it
bumped_version = Versioner.bump_up_version()
click.echo('Creating new section in CHANGELOG...')
Versioner.add_changelog_dev_section(bumped_version)
click.echo('Commiting dev version: {}'.format(bumped_version))
Versioner.commit_version(bumped_version)
# Run git add and git status
click.echo('Adding new changes to the repository...')
call(['git', 'add', '--all'])
call(['git', 'status'])
# Commit repo with updated dev version
click.echo('Creating new commit with new dev version...')
msg = 'Bumps up project to version {}'.format(bumped_version)
call(['git', 'commit', '-m', msg])
call(['git', 'push'])
click.echo('Version {} was created, you are now in {}'
.format(release, bumped_version))
@cli.command(help='Merges changes in dev with master')
def tomaster():
"""
Merges dev with master and pushes
"""
click.echo('Checking out master...')
call(['git', 'checkout', 'master'])
click.echo('Merging master with dev...')
call(['git', 'merge', 'dev'])
click.echo('Pushing changes...')
call(['git', 'push'])
@cli.command(help='Publishes to PyPI')
@click.argument('tag')
@click.option('--production', is_flag=True)
def release(tag, production):
"""
Merges dev with master and pushes
"""
click.echo('Checking out tag {}'.format(tag))
call(['git', 'checkout', tag])
current = Versioner.current_version()
click.confirm('Version in {} tag is {}. Do you want to continue?'
.format(tag, current))
click.echo('Publishing to PyPI...')
where = 'pypitest' if not production else 'pypi'
call(['python', 'setup.py', 'sdist', 'upload', '-r', where])
if __name__ == '__main__':
cli()
| mit |
yask123/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
teonlamont/mne-python | mne/viz/evoked.py | 2 | 94662 | # -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
from functools import partial
from copy import deepcopy
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type, _pick_data_channels,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT)
from ..externals.six import string_types
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _grad_pair_pick_and_name, _check_cov,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topo_plot, plot_topomap, _check_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator)
from ..channels.layout import _pair_grad_sensors, _auto_topomap_coords
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s'):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
info, ch_type, layout=None)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
method = 'mean' if psd else 'rms'
this_data = _merge_grad_data(this_data, method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable("box")
pos_x, pos_y = _prepare_topomap(pos, ax, check_nonzero=False)
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
set_tight_layout=True, selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
# For evoked.plot_image ...
# First input checks for group_by and axes if any of them is not None.
# Either both must be dicts, or neither.
# If the former, the two dicts provide picks and axes to plot them to.
# Then, we call this function recursively for each entry in `group_by`.
if plot_type == "image" and isinstance(group_by, dict):
if axes is None:
axes = dict()
for sel in group_by:
plt.figure()
axes[sel] = plt.axes()
if not isinstance(axes, dict):
raise ValueError("If `group_by` is a dict, `axes` must be "
"a dict of axes or None.")
_validate_if_list_of_axes(list(axes.values()))
remove_xlabels = any([ax.is_last_row() for ax in axes.values()])
for sel in group_by: # ... we loop over selections
if sel not in axes:
raise ValueError(sel + " present in `group_by`, but not "
"found in `axes`")
ax = axes[sel]
# the unwieldy dict comp below defaults the title to the sel
_plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,
proj, xlim, hline, units, scalings,
(titles if titles is not None else
{channel_type(evoked.info, idx): sel
for idx in group_by[sel]}),
ax, plot_type, cmap=cmap, gfp=gfp,
window_title=window_title,
set_tight_layout=set_tight_layout,
selectable=selectable, noise_cov=noise_cov,
colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, time_unit=time_unit,
show_names=show_names)
if remove_xlabels and not ax.is_last_row():
ax.set_xticklabels([])
ax.set_xlabel("")
ims = [ax.images[0] for ax in axes.values()]
clims = np.array([im.get_clim() for im in ims])
min, max = clims.min(), clims.max()
for im in ims:
im.set_clim(min, max)
figs = [ax.get_figure() for ax in axes.values()]
if len(set(figs)) is 1:
return figs[0]
else:
return figs
elif isinstance(axes, dict):
raise ValueError("If `group_by` is not a dict, "
"`axes` must not be a dict either.")
time_unit, times = _check_time_unit(time_unit, evoked.times)
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
if isinstance(gfp, string_types) and gfp != 'only':
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
if picks is None:
picks = list(range(info['nchan']))
if len(picks) != len(set(picks)):
raise ValueError("`picks` are not unique. Please remove duplicates.")
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, string_types) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError(
'exclude has to be a list of channel names or "bads"')
picks = [pick for pick in picks if pick not in exclude]
picks = np.array(picks)
types = np.array([channel_type(info, idx) for idx in picks])
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
fig.canvas.set_window_title(window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
noise_cov = _check_cov(noise_cov, info)
projector, whitened_ch_names = _setup_plot_projector(
info, noise_cov, proj=proj is True, nave=evoked.nave)
evoked = evoked.copy()
if len(whitened_ch_names) > 0:
unit = False
if projector is not None:
evoked.data[:] = np.dot(projector, evoked.data)
if plot_type == 'butterfly':
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1., nave=evoked.nave,
time_unit=time_unit)
plt.setp(axes, xlabel='Time (%s)' % time_unit)
elif plot_type == 'image':
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
use_nave = evoked.nave if ai == 0 else None
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, times, xlim, ylim, titles,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
nave=use_nave, time_unit=time_unit,
show_names=show_names, ch_names=evoked.ch_names)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
plt_show(show)
return fig
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha, nave, time_unit='ms'):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == 'grad' and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info('Need more than one channel to make '
'topography for %s. Disabling interactivity.'
% (this_type,))
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
else:
this_scaling = 1. if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = (isinstance(gfp, string_types) and gfp == 'only')
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if spatial_colors is True and (locs3d == 0).all():
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd,
ax)
else:
if isinstance(spatial_colors, (tuple, string_types)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=3.,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.2)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
'GFP', zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(title=r'%s (%d channel%s)'
% (titles[this_type], len(D), _pl(len(D))))
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd, time_unit=time_unit)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, rectprops=dict(alpha=0.5, facecolor='red'))
def _add_nave(ax, nave):
"""Add nave to axes."""
if nave is not None:
ax.annotate(
r'N$_{\mathrm{ave}}$=%d' % nave, ha='left', va='bottom',
xy=(0, 1), xycoords='axes fraction',
xytext=(0, 5), textcoords='offset pixels')
def _handle_spatial_colors(colors, info, idx, ch_type, psd, ax):
"""Set up spatial colors."""
used_nm = np.array(_clean_names(info['ch_names']))[idx]
# find indices for bads
bads = [np.where(used_nm == bad)[0][0] for bad in info['bads'] if bad in
used_nm]
pos = _auto_topomap_coords(info, idx, ignore_overlap=True, to_sphere=True)
pos, outlines = _check_outlines(pos, np.array([1, 1]),
{'center': (0, 0), 'scale': (0.5, 0.5)})
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, picks=None, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = "Channels" if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
if show_names is not False:
if show_names == "all":
yticks = np.arange(len(picks)).astype(int)
yticklabels = np.array(ch_names)[picks]
else:
max_tick = len(picks)
yticks = [tick for tick in ax.get_yticks() if tick < max_tick]
yticks = np.array(yticks).astype(int)
# these should only ever be ints right?
yticklabels = np.array(ch_names)[picks][yticks]
ax.set(yticks=yticks + .5, yticklabels=yticklabels)
@verbose
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True, noise_cov=None, time_unit='s', verbose=None):
"""Plot evoked data using butterfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
hline : list of floats | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
`spatial_colors` is used.
If str, must be `std` or `unsorted` (defaults to `unsorted`). If
`std`, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If `unsorted`, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
See Also
--------
mne.viz.plot_evoked_white
"""
return _plot_evoked(
evoked=evoked, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title, spatial_colors=spatial_colors,
selectable=selectable, zorder=zorder, noise_cov=noise_cov,
time_unit=time_unit)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
background_color : str | obj
Background color. Typically 'k' (black) or 'w' (white; default).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
from matplotlib.colors import colorConverter
if not type(evoked) in (tuple, list):
evoked = [evoked]
dark_background = \
np.mean(colorConverter.to_rgb(background_color)) < 0.5
if dark_background:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'w'
else:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'k'
if color is None:
if dark_background:
color = ['w'] + _get_color_list()
else:
color = _get_color_list()
color = color * ((len(evoked) % len(color)) + 1)
color = color[:len(evoked)]
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, merge_grads=merge_grads,
legend=legend, axes=axes, show=show,
noise_cov=noise_cov)
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True,
show=True, clim=None, xlim='tight', proj=False,
units=None, scalings=None, titles=None, axes=None,
cmap='RdBu_r', colorbar=True, mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
time_unit='s', show_names="auto", group_by=None):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
This parameter can also be used to set the order the channels
are shown in, as the channel image is sorted by the order of picks.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots (after scaling has been applied). e.g.
clim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='uV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axis | list | dict | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
If `group_by` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of `group_by`. In that
case, the provided axes will be used for the corresponding groups.
Defaults to `None`.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ``('RdBu_r', True)``.
Defaults to ``'RdBu_r'``.
colorbar : bool
If True, plot a colorbar. Defaults to True.
.. versionadded:: 0.16
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to ```False`` in the mask are masked (see
`do_mask` below). Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16
mask_style: None | 'both' | 'contour' | 'mask'
If `mask` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in `mask`). If 'mask', entries not
``True`` in `mask` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if `mask` is not None, and is ignored
otherwise.
.. versionadded:: 0.16
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
`mask` is not ``None``. If None, `cmap` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as `cmap`.
mask_alpha : float
A float between 0 and 1. If `mask` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to .25.
.. versionadded:: 0.16
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
show_names : bool | str
Determines if channel names should be plotted on the y axis. If False,
no names are shown. If True, ticks are set automatically and the
corresponding channel names are shown. If str, must be "auto" or "all".
If "all", all channel names are shown.
If "auto", is set to False if `picks` is ``None``; to ``True`` if
`picks` is not ``None`` and fewer than 25 picks are shown; to "all"
if `picks` is not ``None`` and contains fewer than 25 entries.
group_by : None | dict
If a dict, the values must be picks, and `axes` must also be a dict
with matching keys, or None. If `axes` is None, one figure and one axis
will be created for each entry in `group_by`.
Then, for each entry, the picked channels will be plotted
to the corresponding axis. If `titles` are None, keys will become plot
titles. This is useful for e.g. ROIs. Each entry must contain only
one channel type. For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If None, all picked channels are plotted to the same axis.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim, hline=None,
units=units, scalings=scalings, titles=titles,
axes=axes, plot_type="image", cmap=cmap,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names,
group_by=group_by)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_ydata(di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
@verbose
def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s',
verbose=None):
u"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
[1]_. This function is especially useful for investigating noise
covariance properties to determine if data are properly whitened (e.g.,
achieving expected values in line with model assumptions, see Notes below).
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance. Can be a string to load a covariance from disk.
show : bool
Show figure if True.
rank : dict of int | None
Dict of ints where keys are 'eeg', 'meg', mag' or 'grad'. If None,
the rank is detected automatically. Defaults to None. 'mag' or
'grad' cannot be specified jointly with 'meg'. For SSS'd data,
only 'meg' is valid. For non-SSS'd data, 'mag' and/or 'grad' must be
specified separately. If only one is specified, the other one gets
estimated. Note. The rank estimation will be printed by the logger for
each noise covariance estimator that is passed.
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
See Also
--------
mne.Evoked.plot
Notes
-----
If baseline signals match the assumption of Gaussian white noise,
values should be centered at 0, and be within 2 standard deviations
(±1.96) for 95% of the time points. For the global field power (GFP),
we expect it to fluctuate around a value of 1.
If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger
(if ``verbose=True``) for each noise covariance estimator that is passed.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
scalings=None, rank=rank, show=show,
time_unit=time_unit)
def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True,
time_unit='s'):
"""Help plot_evoked_white.
Additional Parameters
---------------------
scalings : dict | None
The rescaling method to be applied to improve the accuracy of rank
estimaiton. If dict, it will override the following default values
(used if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
Note. Theses values were tested on different datests across various
conditions. You should not need to update them.
"""
from ..cov import whiten_evoked, read_cov # recursive import
import matplotlib.pyplot as plt
time_unit, times = _check_time_unit(time_unit, evoked.times)
if isinstance(noise_cov, string_types):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
evoked.pick_types(ref_meg=False, exclude='bads', **_PICK_TYPES_DATA_DICT)
n_ch_used, rank_list, picks_list, has_sss = _triage_rank_sss(
evoked.info, noise_cov, rank, scalings)
del rank, scalings
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
# get one whitened evoked per cov
evokeds_white = [whiten_evoked(evoked, cov, picks=None, rank=r)
for cov, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
fig, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used >= 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = _handle_default('color', None)
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
# the first is by law the best noise cov, on the left we plot that one.
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False,
time_unit=time_unit)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k',
lw=0.5)
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--', lw=2)
ax.set(title='%s (%d channel%s)'
% (titles_[ch_type], len(picks), _pl(len(picks))))
# Now plot the GFP for all covs if indicated.
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax = ax_gfp[i]
ax.set_title(title if n_columns > 1 else
'Whitened GFP, method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
# Wrap SSS-processed data (MEG) to the mag color
color_ch = 'mag' if ch == 'meg' else ch
ax.plot(times, gfp,
label=label if n_columns > 1 else title,
color=color if n_columns > 1 else ch_colors[color_ch],
lw=0.5)
ax.set(xlabel='Time (%s)' % (time_unit,), ylabel=r'GFP ($\chi^2$)',
xlim=[times[0], times[-1]], ylim=(0, 10))
ax.axhline(1, color='red', linestyle='--', lw=2.)
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def plot_snr_estimate(evoked, inv, show=True, verbose=None):
"""Plot a data SNR estimate.
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
The bluish green line is the SNR determined by the GFP of the whitened
evoked data. The orange line is the SNR estimated based on the mismatch
between the data and the data re-estimated from the regularized inverse.
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv)
fig, ax = plt.subplots(1, 1)
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.axvline(0, color='k', ls=':', lw=1)
ax.axhline(0, color='k', ls=':', lw=1)
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
ax.plot(evoked.times, snr_est, color=[0.0, 0.6, 0.5])
ax.plot(evoked.times, snr - 1, color=[0.8, 0.4, 0.0])
ax.set(xlim=lims[:2], ylim=lims[2:], ylabel='SNR', xlabel='Time (s)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
plt_show(show)
return fig
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None, show=True, ts_args=None,
topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for time points.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of floats | "auto" | "peaks"
The time point(s) to plot. If "auto", 5 evenly spaced topographies
between the first and last time instant will be shown. If "peaks",
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to "peaks".
title : str | None
The title. If `None`, suppress printing channel type. If an empty
string, a default title is created. Defaults to ''.
picks : array-like of int | None
The indices of channels to plot. If None show all. Defaults to None.
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to None.
show : bool
Show figure if True. Defaults to True.
ts_args : None | dict
A dict of `kwargs` that are forwarded to :meth:`mne.Evoked.plot` to
style the butterfly plot. If they are not in this dict, the following
defaults are passed: ``spatial_colors=True``, ``zorder='std'``.
``show`` and ``exclude`` are illegal.
If None, no customizable arguments will be passed.
Defaults to `None`.
topomap_args : None | dict
A dict of `kwargs` that are forwarded to
:meth:`mne.Evoked.plot_topomap` to style the topomaps.
If it is not in this dict, ``outlines='skirt'``
will be passed. `show`, `times`, `colorbar` are illegal`
If None, no customizable arguments will be passed.
Defaults to `None`.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If `evoked` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is not None and not isinstance(ts_args, dict):
raise TypeError('ts_args must be dict or None, got type %s'
% (type(ts_args),))
ts_args = dict() if ts_args is None else ts_args.copy()
ts_args['time_unit'], evoked_times = _check_time_unit(
ts_args.get('time_unit', 's'), evoked.times)
if topomap_args is None:
topomap_args = dict()
illegal_args = {"show", 'times', 'exclude'}
for args in (ts_args, topomap_args):
if any((x in args for x in illegal_args)):
raise ValueError("Don't pass any of {} as *_args.".format(
", ".join(list(illegal_args))))
if ("axes" in ts_args) or ("axes" in topomap_args):
if not ("axes" in ts_args) and ("axes" in topomap_args):
raise ValueError("If one of `ts_args` and `topomap_args` contains "
"'axes', the other must, too.")
if "axes" in ts_args:
_validate_if_list_of_axes([ts_args["axes"]], 1)
n_topomaps = (3 if times is None else len(times)) + 1
if "axes" in topomap_args:
_validate_if_list_of_axes(list(topomap_args["axes"]), n_topomaps)
# channel selection
# simply create a new evoked object with the desired channel selection
evoked = _pick_inst(evoked, picks, exclude, copy=True)
info = evoked.info
ch_types = _get_channel_types(info, restrict_data_types=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
if "axes" in ts_args or "axes" in topomap_args:
raise NotImplementedError(
"Currently, passing axes manually (via `ts_args` or "
"`topomap_args`) is not supported for multiple channel types.")
figs = list()
for this_type in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == this_type])
if len(_get_channel_types(ev_.info)) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
# set up time points to show topomaps for
times_sec = _process_times(evoked, times, few=True)
del times
_, times_ts = _check_time_unit(ts_args['time_unit'], times_sec)
# prepare axes for topomap
if ("axes" not in topomap_args) or ("axes" not in ts_args):
fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes(len(times_sec),
figsize=(8.0, 4.2))
else:
ts_ax = ts_args["axes"]
del ts_args["axes"]
map_ax = topomap_args["axes"][:-1]
cbar_ax = topomap_args["axes"][-1]
del topomap_args["axes"]
fig = cbar_ax.figure
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True, zorder='std')
ts_args_def.update(ts_args)
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], set_tight_layout=False, **ts_args_def)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
if title is not None:
title_ax = plt.subplot(4, 3, 2)
if title == '':
title = old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# topomap
contours = topomap_args.get('contours', 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
vmin, vmax = ts_ax.get_ylim()
norm = ch_type == 'grad'
vmin = 0 if norm else vmin
vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
locator = None
topomap_args_pass = topomap_args.copy()
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass['contours'] = contours
evoked.plot_topomap(times=times_sec, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
if isinstance(contours, (list, np.ndarray)):
cbar.set_ticks(contours)
else:
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(times_ts, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in times_ts:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
def _aux_setup_styles(conditions, style_dict, style, default):
"""Set linestyles and colors for plot_compare_evokeds."""
# check user-supplied style to condition matching
tags = set([tag for cond in conditions for tag in cond.split("/")])
msg = ("Can't map between conditions and the provided {0}. Make sure "
"you have provided keys in the format of '/'-separated tags, "
"and that these correspond to '/'-separated tags for the condition "
"names (e.g., conditions like 'Visual/Right', and styles like "
"'colors=dict(Visual='red'))'. The offending tag was '{1}'.")
for key in style_dict:
for tag in key.split("/"):
if tag not in tags:
raise ValueError(msg.format(style, tag))
# check condition to style matching, and fill in defaults
condition_warning = "Condition {0} could not be mapped to a " + style
style_warning = ". Using the default of {0}.".format(default)
for condition in conditions:
if condition not in style_dict:
if "/" not in condition:
warn(condition_warning.format(condition) + style_warning)
style_dict[condition] = default
for style_ in style_dict:
if style_ in condition.split("/"):
style_dict[condition] = style_dict[style_]
break
return style_dict
def _truncate_yaxis(axes, ymin, ymax, orig_ymin, orig_ymax, fraction,
any_positive, any_negative, truncation_style):
"""Truncate the y axis in plot_compare_evokeds."""
if truncation_style != "max_ticks":
abs_lims = (orig_ymax if orig_ymax > np.abs(orig_ymin)
else np.abs(orig_ymin))
ymin_, ymax_ = (-(abs_lims // fraction), abs_lims // fraction)
# user supplied ymin and ymax overwrite everything
if ymin is not None and ymin > ymin_:
ymin_ = ymin
if ymax is not None and ymax < ymax_:
ymax_ = ymax
yticks = (ymin_ if any_negative else 0, ymax_ if any_positive else 0)
axes.set_yticks(yticks)
ymin_bound, ymax_bound = (-(abs_lims // fraction),
abs_lims // fraction)
# user supplied ymin and ymax still overwrite everything
if ymin is not None and ymin > ymin_bound:
ymin_bound = ymin
if ymax is not None and ymax < ymax_bound:
ymax_bound = ymax
precision = 0.25 # round to .25
if ymin is None:
ymin_bound = round(ymin_bound / precision) * precision
if ymin is None:
ymax_bound = round(ymax_bound / precision) * precision
axes.spines['left'].set_bounds(ymin_bound, ymax_bound)
else: # code stolen from seaborn
yticks = axes.get_yticks()
firsttick = np.compress(yticks >= min(axes.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(axes.get_ylim()),
yticks)[-1]
axes.spines['left'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
axes.set_yticks(newticks)
ymin_bound, ymax_bound = newticks[[0, -1]]
return ymin_bound, ymax_bound
def _combine_grad(evoked, picks):
"""Create a new instance of Evoked with combined gradiometers (RMSE)."""
def pair_and_combine(data):
data = data ** 2
data = (data[::2, :] + data[1::2, :]) / 2
return np.sqrt(data)
picks, ch_names = _grad_pair_pick_and_name(evoked.info, picks)
this_data = pair_and_combine(evoked.data[picks, :])
ch_names = ch_names[::2]
evoked = evoked.copy().pick_channels(ch_names)
combined_ch_names = [ch_name[:-1] + "X" for ch_name in ch_names]
evoked.rename_channels({c_old: c_new for c_old, c_new
in zip(evoked.ch_names, combined_ch_names)})
evoked.data = this_data
return evoked
def _check_loc_legal(loc, what='your choice', default=1):
"""Check if loc is a legal location for MPL subordinate axes."""
true_default = {"show_legend": 3, "show_sensors": 4}.get(what, default)
if isinstance(loc, bool) and loc:
loc = true_default
loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3,
'lower right': 4, 'right': 5, 'center left': 6,
'center right': 7, 'lower center': 8, 'upper center': 9,
'center': 10}
loc_ = loc_dict.get(loc, loc)
if loc_ not in range(11):
raise ValueError(str(loc) + " is not a legal MPL loc, please supply"
"another value for " + what + ".")
return loc_
def _format_evokeds_colors(evokeds, cmap, colors):
"""Set up to have evokeds as a dict as well as colors."""
from ..evoked import Evoked, _check_evokeds_ch_names_times
if isinstance(evokeds, Evoked):
evokeds = dict(Evoked=evokeds) # title becomes 'Evoked'
elif not isinstance(evokeds, dict): # it's assumed to be a list
if (cmap is not None) and (colors is None):
colors = dict((str(ii + 1), ii) for ii, _ in enumerate(evokeds))
evokeds = dict((str(ii + 1), evoked)
for ii, evoked in enumerate(evokeds))
else:
assert isinstance(evokeds, dict)
if (colors is None) and cmap is not None:
raise ValueError('If evokeds is a dict and a cmap is passed, '
'you must specify the colors.')
for cond in evokeds.keys():
_validate_type(cond, 'str', "Conditions")
# Now make sure all values are list of Evoked objects
evokeds = {condition: [v] if isinstance(v, Evoked) else v
for condition, v in evokeds.items()}
# Check that all elements are of type evoked
for this_evoked in evokeds.values():
for ev in this_evoked:
_validate_type(ev, Evoked, "All evokeds entries ", "Evoked")
# Check that all evoked objects have the same time axis and channels
all_evoked = sum(evokeds.values(), [])
_check_evokeds_ch_names_times(all_evoked)
return evokeds, colors
def _setup_styles(conditions, styles, cmap, colors, linestyles):
"""Set up plotting styles for each condition."""
import matplotlib.pyplot as plt
# continuous colors
the_colors, color_conds, color_order = None, None, None
colors_are_float = False
if cmap is not None:
for color_value in colors.values():
try:
float(color_value)
except ValueError:
raise TypeError("If ``cmap`` is not None, the values of "
"``colors`` must be numeric. Got %s" %
type(color_value))
cmapper = getattr(plt.cm, cmap, cmap)
color_conds = list(colors.keys())
all_colors = [colors[cond] for cond in color_conds]
color_order = np.array(all_colors).argsort()
color_indices = color_order.argsort()
if all([isinstance(color, Integral) for color in all_colors]):
msg = "Integer colors detected, mapping to rank positions ..."
n_colors = len(all_colors)
colors_ = {cond: ind for cond, ind in
zip(color_conds, color_indices)}
def convert_colors(color):
return colors_[color]
else:
for color in all_colors:
if not 0 <= color <= 1:
raise ValueError("Values of colors must be all-integer or "
"floats between 0 and 1, got %s." % color)
msg = "Float colors detected, mapping to percentiles ..."
n_colors = 101 # percentiles plus 1 if we have 1.0s
colors_old = colors.copy()
def convert_colors(color):
return int(colors_old[color] * 100)
colors_are_float = True
logger.info(msg)
the_colors = cmapper(np.linspace(0, 1, n_colors))
colors = dict()
for cond in conditions:
cond_ = cond.split("/")
for color in color_conds:
if color in cond_:
colors[cond] = the_colors[convert_colors(color)]
continue
# categorical colors
if not isinstance(colors, dict):
colors_ = _get_color_list()
if len(conditions) > len(colors_):
msg = ("Trying to plot more than {0} conditions. We provide"
"only {0} default colors. Please supply colors manually.")
raise ValueError(msg.format(len(colors_)))
colors = dict((condition, color) for condition, color
in zip(conditions, colors_))
else:
colors = _aux_setup_styles(conditions, colors, "color", "grey")
# linestyles
if not isinstance(linestyles, dict):
linestyles = dict((condition, linestyle) for condition, linestyle in
zip(conditions, ['-'] * len(conditions)))
else:
linestyles = _aux_setup_styles(conditions, linestyles,
"linestyle", "-")
# finally, put it all together
if styles is None:
styles = dict()
for condition, color, linestyle in zip(conditions, colors, linestyles):
styles[condition] = styles.get(condition, dict())
styles[condition]['c'] = styles[condition].get('c', colors[condition])
styles[condition]['linestyle'] = styles[condition].get(
'linestyle', linestyles[condition])
return styles, the_colors, color_conds, color_order, colors_are_float
def plot_compare_evokeds(evokeds, picks=None, gfp=False, colors=None,
linestyles=['-'], styles=None, cmap=None,
vlines="auto", ci=0.95, truncate_yaxis="max_ticks",
truncate_xaxis=True, ylim=dict(), invert_y=False,
show_sensors=None, show_legend=True,
split_legend=False, axes=None, title=None, show=True):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as condition labels.
If a list of Evokeds, the contents are plotted with indices as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
picks : None | int | list of int
If int or list of int, the indices of the sensors to average and plot.
If multiple channel types are selected, one figure will be returned for
each channel type.
If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
If None, it defaults to all data channels, in which case the global
field power will be plotted for all channel type available.
gfp : bool
If True, the channel type wise GFP is plotted.
If `picks` is an empty list (default), this is set to True.
colors : list | dict | None
If a list, will be sequentially used for line colors.
If a dict, can map evoked keys or '/'-separated (HED) tags to
conditions.
For example, if `evokeds` is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `colors` can be `dict(Aud='r', Vis='b')` to map both
Aud/L and Aud/R to the color red and both Visual conditions to blue.
If None (default), a sequence of desaturated colors is used.
If `cmap` is None, `colors` will indicate how each condition is
colored with reference to its position on the colormap - see `cmap`
below. In that case, the values of colors must be either integers,
in which case they will be mapped to colors in rank order; or floats
between 0 and 1, in which case they will be mapped to percentiles of
the colormap.
linestyles : list | dict
If a list, will be sequentially and repeatedly used for evoked plot
linestyles.
If a dict, can map the `evoked` keys or '/'-separated (HED) tags to
conditions.
For example, if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `linestyles` can be `dict(L='--', R='-')` to map both
Aud/L and Vis/L to dashed lines and both Right-side conditions to
straight lines.
styles : dict | None
If a dict, keys must map to evoked keys or conditions, and values must
be a dict of legal inputs to `matplotlib.pyplot.plot`. These
parameters will be passed to the line plot call of the corresponding
condition, overriding defaults.
E.g., if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `styles` can be `{"Aud/L": {"linewidth": 1}}` to set
the linewidth for "Aud/L" to 1. Note that HED ('/'-separated) tags are
not supported.
cmap : None | str | tuple
If not None, plot evoked activity with colors from a color gradient
(indicated by a str referencing a matplotlib colormap - e.g., "viridis"
or "Reds").
If ``evokeds`` is a list and ``colors`` is `None`, the color will
depend on the list position. If ``colors`` is a list, it must contain
integers where the list positions correspond to ``evokeds``, and the
value corresponds to the position on the colorbar.
If ``evokeds`` is a dict, ``colors`` should be a dict mapping from
(potentially HED-style) condition tags to numbers corresponding to
positions on the colorbar - rank order for integers, or floats for
percentiles. E.g., ::
evokeds={"cond1/A": ev1, "cond2/A": ev2, "cond3/A": ev3, "B": ev4},
cmap='viridis', colors=dict(cond1=1 cond2=2, cond3=3),
linestyles={"A": "-", "B": ":"}
If ``cmap`` is a tuple of length 2, the first item must be
a string which will become the colorbar label, and the second one
must indicate a colormap, e.g. ::
cmap=('conds', 'viridis'), colors=dict(cond1=1 cond2=2, cond3=3),
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0. If an empty list is passed,
no vertical lines are plotted.
ci : float | callable | None | bool
If not None and ``evokeds`` is a [list/dict] of lists, a shaded
confidence interval is drawn around the individual time series. If
float, a percentile bootstrap method is used to estimate the confidence
interval and this value determines the CI width. E.g., if this value is
.95 (the default), the 95% confidence interval is drawn. If a callable,
it must take as its single argument an array (observations x times) and
return the upper and lower confidence bands.
If None or False, no confidence band is plotted.
If True, a 95% bootstrapped confidence interval is drawn.
truncate_yaxis : bool | str
If not False, the left y axis spine is truncated to reduce visual
clutter. If 'max_ticks', the spine is truncated at the minimum and
maximum ticks. Else, it is truncated to half the max absolute value,
rounded to .25. Defaults to "max_ticks".
truncate_xaxis : bool
If True, the x axis is truncated to span from the first to the last.
xtick. Defaults to True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
invert_y : bool
If True, negative values are plotted up (as is sometimes done
for ERPs out of tradition). Defaults to False.
show_sensors: bool | int | str | None
If not False, channel locations are plotted on a small head circle.
If int or str, the position of the axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
If None, defaults to True if ``gfp`` is False, else to False.
show_legend : bool | str | int
If not False, show a legend. If int or str, it is the position of the
legend axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
split_legend : bool
If True, the legend shows color and linestyle separately; `colors` must
not be None. Defaults to True if ``cmap`` is not None, else defaults to
False.
axes : None | `matplotlib.axes.Axes` instance | list of `axes`
What axes to plot to. If None, a new axes is created.
When plotting multiple channel types, can also be a list of axes, one
per channel type.
title : None | str
If str, will be plotted as figure title. If None, the channel names
will be shown.
show : bool
If True, show the figure.
Returns
-------
fig : Figure | list of Figures
The figure(s) in which the plot is drawn. When plotting multiple
channel types, a list of figures, one for each channel type is
returned.
Notes
-----
When multiple channels are passed, this function combines them all, to
get one time course for each condition. If gfp is True it combines
channels using global field power (GFP) computation, else it is taking
a plain mean.
This function is useful for comparing multiple ER[P/F]s - e.g., for
multiple conditions - at a specific location.
It can plot:
- a simple :class:`mne.Evoked` object,
- a list or dict of :class:`mne.Evoked` objects (e.g., for multiple
conditions),
- a list or dict of lists of :class:`mne.Evoked` (e.g., for multiple
subjects in multiple conditions).
In the last case, it can show a confidence interval (across e.g. subjects)
using parametric or bootstrap estimation.
When ``picks`` includes more than one planar gradiometer, the planar
gradiometers are combined with RMSE. For example data from a
VectorView system with 204 gradiometers will be transformed to
102 channels.
"""
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
evokeds, colors = _format_evokeds_colors(evokeds, cmap, colors)
conditions = sorted(list(evokeds.keys()))
# check ci parameter
if ci is None:
ci = False
if ci is True:
ci = .95
elif ci is not False and not (isinstance(ci, np.float) or callable(ci)):
raise TypeError('ci must be None, bool, float or callable, got %s' %
type(ci))
# get and set a few limits and variables (times, channels, units)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
tmin, tmax = times[0], times[-1]
if vlines == "auto" and (tmin < 0 and tmax > 0):
vlines = [0.]
_validate_type(vlines, (list, tuple), "vlines", "list or tuple")
if isinstance(picks, Integral):
picks = [picks]
elif picks is None:
logger.info("No picks, plotting the GFP ...")
gfp = True
picks = _pick_data_channels(info, with_ref_meg=False)
_validate_type(picks, (list, np.ndarray), "picks",
"list or np.array of integers")
for entry in picks:
_validate_type(entry, 'int', "entries of picks", "integers")
if len(picks) == 0:
raise ValueError("No valid channels were found to plot the GFP. " +
"Use 'picks' instead to select them manually.")
if ylim is None:
ylim = dict()
# deal with picks: infer indices and names
if gfp is True:
if show_sensors is None:
show_sensors = False # don't show sensors for GFP
ch_names = ['Global Field Power']
if len(picks) < 2:
raise ValueError("Cannot compute GFP for fewer than 2 channels, "
"please pick more than %d channels." % len(picks))
else:
if show_sensors is None:
show_sensors = True # show sensors when not doing GFP
ch_names = [one_evoked.ch_names[pick] for pick in picks]
picks_by_types = channel_indices_by_type(info, picks)
# keep only channel types for which there is a channel:
ch_types = [t for t in picks_by_types if len(picks_by_types[t]) > 0]
# let's take care of axis and figs
if axes is not None:
if not isinstance(axes, list):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
else:
axes = [plt.subplots(figsize=(8, 6))[1] for _ in range(len(ch_types))]
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one figure "
"per type.")
figs = list()
for ii, t in enumerate(ch_types):
picks_ = picks_by_types[t]
title_ = "GFP, " + t if (title is None and gfp is True) else title
figs.append(plot_compare_evokeds(
evokeds, picks=picks_, gfp=gfp, colors=colors,
linestyles=linestyles, styles=styles, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, ylim=ylim, invert_y=invert_y,
axes=axes[ii], title=title_, show=show))
return figs
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
all_positive = gfp # True if not gfp, False if gfp
pos_picks = picks # keep locations to pick for plotting
if ch_type == "grad" and len(picks) > 1:
logger.info('Combining all planar gradiometers with RMSE.')
pos_picks, _ = _grad_pair_pick_and_name(one_evoked.info, picks)
pos_picks = pos_picks[::2]
all_positive = True
for cond, this_evokeds in evokeds.items():
evokeds[cond] = [_combine_grad(e, picks) for e in this_evokeds]
ch_names = evokeds[cond][0].ch_names
picks = range(len(ch_names))
del info
ymin, ymax = ylim.get(ch_type, [None, None])
scaling = _handle_default("scalings")[ch_type]
unit = _handle_default("units")[ch_type]
if (ymin is None) and all_positive:
ymin = 0. # 'grad' and GFP are plotted as all-positive
# if we have a dict/list of lists, we compute the grand average and the CI
_ci_fun = None
if ci is not False:
if callable(ci):
_ci_fun = ci
else:
from ..stats import _ci
_ci_fun = partial(_ci, ci=ci, method="bootstrap")
# calculate the CI
ci_dict, data_dict = dict(), dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# this will fail if evokeds do not have the same structure
# (e.g. channel count)
data = [e.data[picks, :] * scaling for e in this_evokeds]
data = np.array(data)
if gfp:
data = np.sqrt(np.mean(data * data, axis=1))
else:
data = np.mean(data, axis=1) # average across channels
if _ci_fun is not None: # compute CI if requested:
ci_dict[cond] = _ci_fun(data)
# average across conditions:
data_dict[cond] = data = np.mean(data, axis=0)
_check_if_nan(data)
del evokeds
# we now have dicts for data ('evokeds' - grand averaged Evoked's)
# and the CI ('ci_array') with cond name labels
# style the individual condition time series
# Styles (especially color and linestyle) are pulled from a dict 'styles'.
# This dict has one entry per condition. Its color and linestyle entries
# are pulled from the 'colors' and 'linestyles' dicts via '/'-tag matching
# unless they are overwritten by entries from a user-provided 'styles'.
# first, copy to avoid overwriting
styles = deepcopy(styles)
colors = deepcopy(colors)
linestyles = deepcopy(linestyles)
# second, check if input is valid
if isinstance(styles, dict):
for style_ in styles:
if style_ not in conditions:
raise ValueError("Could not map between 'styles' and "
"conditions. Condition " + style_ +
" was not found in the supplied data.")
# third, color
# check: is color a list?
if (colors is not None and not isinstance(colors, string_types) and
not isinstance(colors, dict) and len(colors) > 1):
colors = dict((condition, color) for condition, color
in zip(conditions, colors))
if cmap is not None:
if not isinstance(cmap, string_types) and len(cmap) == 2:
cmap_label, cmap = cmap
else:
cmap_label = ""
# dealing with a split legend
if split_legend is None:
split_legend = cmap is not None # default to True iff cmap is given
if split_legend is True:
if colors is None:
raise ValueError(
"If `split_legend` is True, `colors` must not be None.")
# mpl 1.3 requires us to split it like this. with recent mpl,
# we could use the label parameter of the Line2D
legend_lines, legend_labels = list(), list()
if cmap is None: # ... one set of lines for the colors
for color in sorted(colors.keys()):
line = mlines.Line2D([], [], linestyle="-",
color=colors[color])
legend_lines.append(line)
legend_labels.append(color)
if len(list(linestyles)) > 1: # ... one set for the linestyle
for style, s in linestyles.items():
line = mlines.Line2D([], [], color='k', linestyle=s)
legend_lines.append(line)
legend_labels.append(style)
styles, the_colors, color_conds, color_order, colors_are_float =\
_setup_styles(data_dict.keys(), styles, cmap, colors, linestyles)
# We now have a 'styles' dict with one entry per condition, specifying at
# least color and linestyles.
ax, = axes
del axes
# the actual plot
any_negative, any_positive = False, False
for condition in conditions:
# plot the actual data ('d') as a line
d = data_dict[condition].T
ax.plot(times, d, zorder=1000, label=condition, clip_on=False,
**styles[condition])
if np.any(d > 0) or all_positive:
any_positive = True
if np.any(d < 0):
any_negative = True
# plot the confidence interval if available
if _ci_fun is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['c'], alpha=.3,
clip_on=False)
# truncate the y axis
orig_ymin, orig_ymax = ax.get_ylim()
if not any_positive:
orig_ymax = 0
if not any_negative:
orig_ymin = 0
ax.set_ylim(orig_ymin if ymin is None else ymin,
orig_ymax if ymax is None else ymax)
fraction = 2 if ax.get_ylim()[0] >= 0 else 3
if truncate_yaxis is not False:
_, ymax_bound = _truncate_yaxis(
ax, ymin, ymax, orig_ymin, orig_ymax, fraction,
any_positive, any_negative, truncate_yaxis)
else:
if truncate_yaxis is True and ymin is not None and ymin > 0:
warn("ymin is all-positive, not truncating yaxis")
ymax_bound = ax.get_ylim()[-1]
title = _set_title_multiple_electrodes(
title, "average" if gfp is False else "gfp", ch_names, ch_type=ch_type)
ax.set_title(title)
current_ymin = ax.get_ylim()[0]
# plot v lines
if invert_y is True and current_ymin < 0:
upper_v, lower_v = -ymax_bound, ax.get_ylim()[-1]
else:
upper_v, lower_v = ax.get_ylim()[0], ymax_bound
if vlines:
ax.vlines(vlines, upper_v, lower_v, linestyles='--', colors='k',
linewidth=1., zorder=1)
_setup_ax_spines(ax, vlines, tmin, tmax, invert_y, ymax_bound, unit,
truncate_xaxis)
# and now for 3 "legends" ..
# a head plot showing the sensors that are being plotted
if show_sensors:
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
try:
pos = _auto_topomap_coords(one_evoked.info, pos_picks,
ignore_overlap=True, to_sphere=True)
except ValueError:
warn("Cannot find channel coordinates in the supplied Evokeds. "
"Not showing channel locations.")
else:
head_pos = {'center': (0, 0), 'scale': (0.5, 0.5)}
pos, outlines = _check_outlines(pos, np.array([1, 1]), head_pos)
_validate_type(show_sensors, (np.int, bool, str),
"show_sensors", "numeric, str or bool")
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(pos, ["k"] * len(picks), ax, list(), outlines,
show_sensors, size=25)
# the condition legend
if len(conditions) > 1 and show_legend is not False:
show_legend = _check_loc_legal(show_legend, "show_legend")
legend_params = dict(loc=show_legend, frameon=True)
if split_legend:
if len(legend_lines) > 1:
ax.legend(legend_lines, legend_labels, # see above: mpl 1.3
ncol=1 + (len(legend_lines) // 4), **legend_params)
else:
ax.legend(ncol=1 + (len(conditions) // 5), **legend_params)
# the colormap, if `cmap` is provided
if split_legend and cmap is not None:
# plot the colorbar ... complicated cause we don't have a heatmap
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
ax_cb = divider.append_axes("right", size="5%", pad=0.05)
if colors_are_float:
ax_cb.imshow(the_colors[:, np.newaxis, :], interpolation='none',
aspect=.05)
color_ticks = np.array(list(set(colors.values()))) * 100
ax_cb.set_yticks(color_ticks)
ax_cb.set_yticklabels(color_ticks)
else:
ax_cb.imshow(the_colors[:, np.newaxis, :], interpolation='none')
ax_cb.set_yticks(np.arange(len(the_colors)))
ax_cb.set_yticklabels(np.array(color_conds)[color_order])
ax_cb.yaxis.tick_right()
ax_cb.set(xticks=(), ylabel=cmap_label)
plt_show(show)
return ax.figure
| bsd-3-clause |
mirca/fsopy | build/lib/fsopy/distributions/exp_weibull.py | 2 | 2292 | import numpy as np
from ..simulation.sampling import rejection_sampling
__all__ = ['pdf', 'rvs']
def pdf(r, beta, alpha, eta):
""" Computes the probability density function (pdf) of a random variable
with Exponentiated Weibull distribution.
Parameters
----------
r : numpy.ndarray
Support of the random variable. Must be [a,b), a > 0, b > a.
beta : float
Shape parameter related to the scintillation index.
alpha : float
Shape parameter related to the receiver aperture size. It is also the
number of multipath scatter components at the receiver.
eta : float
Scale parameter that depdens on ``beta``.
Return
------
pdf : numpy.ndarray
The expression of the pdf.
"""
return ((alpha * beta / eta) * np.power(r / eta, beta - 1.0) *
np.exp(- np.power(r / eta, beta)) *
np.power(1.0 - np.exp(- np.power(r / eta, beta)), alpha - 1.0))
def rvs(K, beta, alpha, eta, inter=None):
""" Generates ``K`` i.i.d. samples according to the Exponentiadted Weibull
(EW) distribution using the acceptance-rejection method.
Parameters
----------
K : integer
Number of i.i.d samples.
beta : float
Shape parameter related to the scintillation index.
alpha : float
Shape parameter related to the receiver aperture size. It is also the
number of multipath scatter components at the receiver.
eta : float
Scale parameter that depdens on ``beta``.
inter : float (optional)
Interval on which the samples will be. Default values are ``a=1e-6``
and ``b=10.0``.
Return
------
rvs : numpy.ndarray
1-D array of with ``K`` i.i.d samples from the EW distribution.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> from fsopy import exp_weibull
>>> samples = exp_weibull.rvs(int(1e6), 1, 1, 1, inter=(1e-6, 4.0))
>>> plt.hist(samples, bins=100, normed=True)
>>> r = np.linspace(1e-6, 4., int(1e4))
>>> pdf = exp_weibull.pdf(r, 1, 1, 1)
>>> plt.plot(r, pdf)
>>> plt.show()
"""
if inter is None:
inter = (1e-6, 10.0)
return rejection_sampling(pdf, inter, K, beta, alpha, eta)
| mit |
microsoft/LightGBM | python-package/lightgbm/basic.py | 1 | 161035 | # coding: utf-8
"""Wrapper for C API of LightGBM."""
import abc
import ctypes
import json
import warnings
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
from logging import Logger
from os import SEEK_END
from os.path import getsize
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
import scipy.sparse
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, is_dtype_sparse, pd_DataFrame, pd_Series
from .libpath import find_lib_path
ZERO_THRESHOLD = 1e-35
def _get_sample_count(total_nrow: int, params: str):
sample_cnt = ctypes.c_int(0)
_safe_call(_LIB.LGBM_GetSampleCount(
ctypes.c_int32(total_nrow),
c_str(params),
ctypes.byref(sample_cnt),
))
return sample_cnt.value
class _DummyLogger:
def info(self, msg):
print(msg)
def warning(self, msg):
warnings.warn(msg, stacklevel=3)
_LOGGER = _DummyLogger()
def register_logger(logger):
"""Register custom logger.
Parameters
----------
logger : logging.Logger
Custom logger.
"""
if not isinstance(logger, Logger):
raise TypeError("Logger should inherit logging.Logger class")
global _LOGGER
_LOGGER = logger
def _normalize_native_string(func):
"""Join log messages from native library which come by chunks."""
msg_normalized = []
@wraps(func)
def wrapper(msg):
nonlocal msg_normalized
if msg.strip() == '':
msg = ''.join(msg_normalized)
msg_normalized = []
return func(msg)
else:
msg_normalized.append(msg)
return wrapper
def _log_info(msg):
_LOGGER.info(msg)
def _log_warning(msg):
_LOGGER.warning(msg)
@_normalize_native_string
def _log_native(msg):
_LOGGER.info(msg)
def _log_callback(msg):
"""Redirect logs from native library into Python."""
_log_native(str(msg.decode('utf-8')))
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8'))
return lib
_LIB = _load_lib()
NUMERIC_TYPES = (int, float, bool)
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_numpy_column_array(data):
"""Check whether data is a column numpy array."""
if not isinstance(data, np.ndarray):
return False
shape = data.shape
return len(shape) == 2 and shape[1] == 1
def cast_numpy_1d_array_to_dtype(array, dtype):
"""Cast numpy 1d array to given dtype."""
if array.dtype == dtype:
return array
return array.astype(dtype=dtype, copy=False)
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
return cast_numpy_1d_array_to_dtype(data, dtype)
elif is_numpy_column_array(data):
_log_warning('Converting column-vector to 1d array')
array = data.ravel()
return cast_numpy_1d_array_to_dtype(array, dtype)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, pd_Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list, numpy 1-D array or pandas Series")
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return f"[{','.join(map(str, x))}]"
else:
return str(x)
pairs.append(f"{key}={','.join(map(to_string, val))}")
elif isinstance(val, (str, Path, NUMERIC_TYPES)) or is_numeric(val):
pairs.append(f"{key}={val}")
elif val is not None:
raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}')
return ' '.join(pairs)
class _TempFile:
"""Proxy class to workaround errors on Windows."""
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
self.path = Path(self.name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.path.is_file():
self.path.unlink()
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
class _ConfigAliases:
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"linear_tree": {"linear_tree",
"linear_trees"},
"local_listen_port": {"local_listen_port",
"local_port",
"port"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"num_machines": {"num_machines",
"num_machine"},
"num_threads": {"num_threads",
"num_thread",
"nthread",
"nthreads",
"n_jobs"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"tree_learner": {"tree_learner",
"tree",
"tree_type",
"tree_learner_type"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]:
"""Get a single parameter value, accounting for aliases.
Parameters
----------
main_param_name : str
Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``.
params : dict
Dictionary of LightGBM parameters.
default_value : Any
Default value to use for the parameter, if none is found in ``params``.
Returns
-------
params : dict
A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed.
If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred.
"""
# avoid side effects on passed-in parameters
params = deepcopy(params)
# find a value, and remove other aliases with .pop()
# prefer the value of 'main_param_name' if it exists, otherwise search the aliases
found_value = None
if main_param_name in params.keys():
found_value = params[main_param_name]
for param in _ConfigAliases.get(main_param_name):
val = params.pop(param, None)
if found_value is None and val is not None:
found_value = val
if found_value is not None:
params[main_param_name] = found_value
else:
params[main_param_name] = default_value
return params
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, pd_DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
bad_index_cols_str = ', '.join(data.columns[bad_indices])
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
f"{bad_index_cols_str}")
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, pd_DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy)
pandas_str = f'\npandas_categorical:{categorical_json}\n'
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = lines[-1].decode('utf-8').strip()
if not last_line.startswith(pandas_key):
last_line = lines[-2].decode('utf-8').strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class Sequence(abc.ABC):
"""
Generic data access interface.
Object should support the following operations:
.. code-block::
# Get total row number.
>>> len(seq)
# Random access by row index. Used for data sampling.
>>> seq[10]
# Range data access. Used to read data in batch when constructing Dataset.
>>> seq[0:100]
# Optionally specify batch_size to control range data read size.
>>> seq.batch_size
- With random access, **data sampling does not need to go through all data**.
- With range data access, there's **no need to read all data into memory thus reduce memory usage**.
.. versionadded:: 3.3.0
Attributes
----------
batch_size : int
Default size of a batch.
"""
batch_size = 4096 # Defaults to read 4K rows in each batch.
@abc.abstractmethod
def __getitem__(self, idx: Union[int, slice]) -> np.ndarray:
"""Return data for given row index.
A basic implementation should look like this:
.. code-block:: python
if isinstance(idx, numbers.Integral):
return self.__get_one_line__(idx)
elif isinstance(idx, slice):
return np.stack(self.__get_one_line__(i) for i in range(idx.start, idx.stop))
else:
raise TypeError(f"Sequence index must be integer or slice, got {type(idx).__name__}")
Parameters
----------
idx : int, slice[int]
Item index.
Returns
-------
result : numpy 1-D array, numpy 2-D array
1-D array if idx is int, 2-D array if idx is slice.
"""
raise NotImplementedError("Sub-classes of lightgbm.Sequence must implement __getitem__()")
@abc.abstractmethod
def __len__(self) -> int:
"""Return row count of this sequence."""
raise NotImplementedError("Sub-classes of lightgbm.Sequence must implement __len__()")
class _InnerPredictor:
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string, pathlib.Path or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediction.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(str(model_file)),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string or pathlib.Path, it represents the path of txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, (str, Path)):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(str(data)),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
preds = np.loadtxt(f.name, dtype=np.float64)
nrow = preds.shape[0]
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, dt_DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
_log_warning('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError(f'Cannot predict data for type {type(data).__name__}')
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if is_reshape and not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})')
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data '
f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n'
'You can split your data into chunks '
'and then concatenate predictions for them')
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.empty(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.empty(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.empty(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.empty(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.empty(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.empty(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.empty(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequences or list of numpy arrays
Data source of Dataset.
If string or pathlib.Path, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
self._start_row = 0 # Used when pushing rows one by one.
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def _create_sample_indices(self, total_nrow: int) -> np.ndarray:
"""Get an array of randomly chosen indices from this ``Dataset``.
Indices are sampled without replacement.
Parameters
----------
total_nrow : int
Total number of rows to sample from.
If this value is greater than the value of parameter ``bin_construct_sample_cnt``, only ``bin_construct_sample_cnt`` indices will be used.
If Dataset has multiple input data, this should be the sum of rows of every file.
Returns
-------
indices : numpy array
Indices for sampled data.
"""
param_str = param_dict_to_str(self.get_params())
sample_cnt = _get_sample_count(total_nrow, param_str)
indices = np.empty(sample_cnt, dtype=np.int32)
ptr_data, _, _ = c_int_array(indices)
actual_sample_cnt = ctypes.c_int32(0)
_safe_call(_LIB.LGBM_SampleIndices(
ctypes.c_int32(total_nrow),
c_str(param_str),
ptr_data,
ctypes.byref(actual_sample_cnt),
))
return indices[:actual_sample_cnt.value]
def _init_from_ref_dataset(self, total_nrow: int, ref_dataset: 'Dataset') -> 'Dataset':
"""Create dataset from a reference dataset.
Parameters
----------
total_nrow : int
Number of rows expected to add to dataset.
ref_dataset : Dataset
Reference dataset to extract meta from.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateByReference(
ref_dataset,
ctypes.c_int64(total_nrow),
ctypes.byref(self.handle),
))
return self
def _init_from_sample(
self,
sample_data: List[np.ndarray],
sample_indices: List[np.ndarray],
sample_cnt: int,
total_nrow: int,
) -> "Dataset":
"""Create Dataset from sampled data structures.
Parameters
----------
sample_data : list of numpy arrays
Sample data for each column.
sample_indices : list of numpy arrays
Sample data row index for each column.
sample_cnt : int
Number of samples.
total_nrow : int
Total number of rows for all input files.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
ncol = len(sample_indices)
assert len(sample_data) == ncol, "#sample data column != #column indices"
for i in range(ncol):
if sample_data[i].dtype != np.double:
raise ValueError(f"sample_data[{i}] type {sample_data[i].dtype} is not double")
if sample_indices[i].dtype != np.int32:
raise ValueError(f"sample_indices[{i}] type {sample_indices[i].dtype} is not int32")
# c type: double**
# each double* element points to start of each column of sample data.
sample_col_ptr = (ctypes.POINTER(ctypes.c_double) * ncol)()
# c type int**
# each int* points to start of indices for each column
indices_col_ptr = (ctypes.POINTER(ctypes.c_int32) * ncol)()
for i in range(ncol):
sample_col_ptr[i] = c_float_array(sample_data[i])[0]
indices_col_ptr[i] = c_int_array(sample_indices[i])[0]
num_per_col = np.array([len(d) for d in sample_indices], dtype=np.int32)
num_per_col_ptr, _, _ = c_int_array(num_per_col)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.get_params())
_safe_call(_LIB.LGBM_DatasetCreateFromSampledColumn(
ctypes.cast(sample_col_ptr, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.cast(indices_col_ptr, ctypes.POINTER(ctypes.POINTER(ctypes.c_int32))),
ctypes.c_int32(ncol),
num_per_col_ptr,
ctypes.c_int32(sample_cnt),
ctypes.c_int32(total_nrow),
c_str(params_str),
ctypes.byref(self.handle),
))
return self
def _push_rows(self, data: np.ndarray) -> 'Dataset':
"""Add rows to Dataset.
Parameters
----------
data : numpy 1-D array
New data to add to the Dataset.
Returns
-------
self : Dataset
Dataset object.
"""
nrow, ncol = data.shape
data = data.reshape(data.size)
data_ptr, data_type, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetPushRows(
self.handle,
data_ptr,
data_type,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol),
ctypes.c_int32(self._start_row),
))
self._start_row += nrow
return self
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"linear_tree",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"precise_float_parser",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, (str, Path)):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, (str, Path)):
sub_init_score = np.empty(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range(len(used_indices)):
for j in range(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.empty(init_score.size, dtype=np.float32)
for i in range(num_data):
for j in range(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key in params.keys():
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, str) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, int):
categorical_indices.add(name)
else:
raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature")
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
_log_warning(f'{cat_alias} in param dict is overridden.')
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, (str, Path)):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(str(data)),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0:
if all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif all(isinstance(x, Sequence) for x in data):
self.__init_from_seqs(data, ref_dataset)
else:
raise TypeError('Data list can only be of ndarray or Sequence')
elif isinstance(data, Sequence):
self.__init_from_seqs([data], ref_dataset)
elif isinstance(data, dt_DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}')
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
_log_warning("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError(f'Wrong predictor type {type(predictor).__name__}')
# set feature names
return self.set_feature_name(feature_name)
def __yield_row_from(self, seqs: List[Sequence], indices: Iterable[int]):
offset = 0
seq_id = 0
seq = seqs[seq_id]
for row_id in indices:
assert row_id >= offset, "sample indices are expected to be monotonic"
while row_id >= offset + len(seq):
offset += len(seq)
seq_id += 1
seq = seqs[seq_id]
id_in_seq = row_id - offset
row = seq[id_in_seq]
yield row if row.flags['OWNDATA'] else row.copy()
def __sample(self, seqs: List[Sequence], total_nrow: int) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Sample data from seqs.
Mimics behavior in c_api.cpp:LGBM_DatasetCreateFromMats()
Returns
-------
sampled_rows, sampled_row_indices
"""
indices = self._create_sample_indices(total_nrow)
# Select sampled rows, transpose to column order.
sampled = np.array([row for row in self.__yield_row_from(seqs, indices)])
sampled = sampled.T
filtered = []
filtered_idx = []
sampled_row_range = np.arange(len(indices), dtype=np.int32)
for col in sampled:
col_predicate = (np.abs(col) > ZERO_THRESHOLD) | np.isnan(col)
filtered_col = col[col_predicate]
filtered_row_idx = sampled_row_range[col_predicate]
filtered.append(filtered_col)
filtered_idx.append(filtered_row_idx)
return filtered, filtered_idx
def __init_from_seqs(self, seqs: List[Sequence], ref_dataset: Optional['Dataset'] = None):
"""
Initialize data from list of Sequence objects.
Sequence: Generic Data Access Object
Supports random access and access by batch if properly defined by user
Data scheme uniformity are trusted, not checked
"""
total_nrow = sum(len(seq) for seq in seqs)
# create validation dataset from ref_dataset
if ref_dataset is not None:
self._init_from_ref_dataset(total_nrow, ref_dataset)
else:
param_str = param_dict_to_str(self.get_params())
sample_cnt = _get_sample_count(total_nrow, param_str)
sample_data, col_indices = self.__sample(seqs, total_nrow)
self._init_from_sample(sample_data, col_indices, sample_cnt, total_nrow)
for seq in seqs:
nrow = len(seq)
batch_size = getattr(seq, 'batch_size', None) or Sequence.batch_size
for start in range(0, nrow, batch_size):
end = min(start + batch_size, nrow)
self._push_rows(seq[start:end])
return self
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.empty((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int32(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
_log_warning('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequences or list of numpy arrays
Data source of Dataset.
If string or pathlib.Path, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string or pathlib.Path
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(str(filename))))
return self
def _update_params(self, params):
if not params:
return self
params = deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception(f"Cannot set {field_name} before construct dataset")
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})")
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array or None
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception(f"Cannot get {field_name} before construct Dataset")
tmp_out_len = ctypes.c_int(0)
out_type = ctypes.c_int(0)
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
_log_warning('Using categorical_feature in Dataset.')
return self
else:
_log_warning('categorical_feature in Dataset is overridden.\n'
f'New categorical_feature is {sorted(list(categorical_feature))}')
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstream reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match")
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, pd_DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, dt_DataTable):
self.data = self.data[self.used_indices, :]
else:
_log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n"
"Returning original raw data")
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
was_none = self.data is None
old_self_data_type = type(self.data).__name__
if other.data is None:
self.data = None
elif self.data is not None:
if isinstance(self.data, np.ndarray):
if isinstance(other.data, np.ndarray):
self.data = np.hstack((self.data, other.data))
elif scipy.sparse.issparse(other.data):
self.data = np.hstack((self.data, other.data.toarray()))
elif isinstance(other.data, pd_DataFrame):
self.data = np.hstack((self.data, other.data.values))
elif isinstance(other.data, dt_DataTable):
self.data = np.hstack((self.data, other.data.to_numpy()))
else:
self.data = None
elif scipy.sparse.issparse(self.data):
sparse_format = self.data.getformat()
if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data):
self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format)
elif isinstance(other.data, pd_DataFrame):
self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format)
elif isinstance(other.data, dt_DataTable):
self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format)
else:
self.data = None
elif isinstance(self.data, pd_DataFrame):
if not PANDAS_INSTALLED:
raise LightGBMError("Cannot add features to DataFrame type of raw data "
"without pandas installed. "
"Install pandas and restart your session.")
if isinstance(other.data, np.ndarray):
self.data = concat((self.data, pd_DataFrame(other.data)),
axis=1, ignore_index=True)
elif scipy.sparse.issparse(other.data):
self.data = concat((self.data, pd_DataFrame(other.data.toarray())),
axis=1, ignore_index=True)
elif isinstance(other.data, pd_DataFrame):
self.data = concat((self.data, other.data),
axis=1, ignore_index=True)
elif isinstance(other.data, dt_DataTable):
self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())),
axis=1, ignore_index=True)
else:
self.data = None
elif isinstance(self.data, dt_DataTable):
if isinstance(other.data, np.ndarray):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data)))
elif scipy.sparse.issparse(other.data):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray())))
elif isinstance(other.data, pd_DataFrame):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values)))
elif isinstance(other.data, dt_DataTable):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy())))
else:
self.data = None
else:
self.data = None
if self.data is None:
err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to "
f"{old_self_data_type} type of raw data.\n")
err_msg += ("Set free_raw_data=False when construct Dataset to avoid this"
if was_none else "Freeing raw data")
_log_warning(err_msg)
self.feature_name = self.get_feature_name()
_log_warning("Reseting categorical features.\n"
"You can set new categorical features via ``set_categorical_feature`` method")
self.categorical_feature = "auto"
self.pandas_categorical = None
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string or pathlib.Path
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(str(filename))))
return self
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string, pathlib.Path or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
params = _choose_param_value(
main_param_name="machines",
params=params,
default_value=None
)
# if "machines" is given, assume user wants to do distributed learning, and set up network
if params["machines"] is None:
params.pop("machines", None)
else:
machines = params["machines"]
if isinstance(machines, str):
num_machines_from_machine_list = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines_from_machine_list = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
params = _choose_param_value(
main_param_name="num_machines",
params=params,
default_value=num_machines_from_machine_list
)
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=12400
)
self.set_network(
machines=machines,
local_listen_port=params["local_listen_port"],
listen_time_out=params.get("time_out", 120),
num_machines=params["num_machines"]
)
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(str(model_file)),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(
self,
machines: Union[List[str], Set[str], str],
local_listen_port: int = 12400,
listen_time_out: int = 120,
num_machines: int = 1
) -> "Booster":
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for distributed learning application.
Returns
-------
self : Booster
Booster with set network.
"""
if isinstance(machines, (list, set)):
machines = ','.join(machines)
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
The returned DataFrame has the following columns.
- ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree".
- ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc.
- ``node_index`` : string, unique identifier for a node.
- ``left_child`` : string, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes.
- ``right_child`` : string, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes.
- ``parent_index`` : string, ``node_index`` of this node's parent. ``None`` for the root node.
- ``split_feature`` : string, name of the feature used for splitting. ``None`` for leaf nodes.
- ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes.
- ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes.
- ``decision_type`` : string, logical operator describing how to compare a value to ``threshold``.
For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that
records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes.
- ``missing_direction`` : string, split direction that missing values should go to. ``None`` for leaf nodes.
- ``missing_type`` : string, describes what types of values are treated as missing.
- ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate.
- ``weight`` : float64 or int64, sum of hessian (second-order derivative of objective), summed over observations that fall in this node.
- ``count`` : int64, number of records in the training data that fall into this node.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed. '
'You must install pandas and restart your session to use this method.')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = f'{tree_index}-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = tree.get('split_index' if is_split else 'leaf_index', 0)
return f"{tree_num}{node_type}{node_num}"
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return pd_DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}')
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
Score is returned before any transformation,
e.g. it is raw margin instead of probability of positive class for binary task.
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of score for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of score for each sample point.
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError(f"Lengths of gradient({len(grad)}) and hessian({len(hess)}) don't match")
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : string or pathlib.Path
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(str(filename))))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : string, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string or pathlib.Path, it represents the path to txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string or pathlib.Path, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
out_is_linear = ctypes.c_bool(False)
_safe_call(_LIB.LGBM_BoosterGetLinear(
self.handle,
ctypes.byref(out_is_linear)))
new_params = _choose_param_value(
main_param_name="linear_tree",
params=self.params,
default_value=None
)
new_params["linear_tree"] = out_is_linear.value
train_set = Dataset(data, label, silent=True, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, _, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.empty(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, str):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], str):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, int) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return pd_DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.empty(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if callable(feval):
feval = [feval]
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
for eval_function in feval:
if eval_function is None:
continue
feval_ret = eval_function(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.empty(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError(f"Wrong length of predict results for data {data_idx}")
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of eval metrics
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [
ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
self.__name_inner_eval = [
string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval)
]
self.__higher_better_inner_eval = [
name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval
]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, str):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
| mit |
lmjohns3/movement-classify-experiment | scripts/train-classifier.py | 1 | 2807 | import climate
import cPickle
import gzip
import lmj.plot
import numpy as np
import os
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
logging = climate.get_logger('train-classifier')
PLOT_CM = False
def main(filename):
dataset = cPickle.load(gzip.open(filename))
filename = os.path.basename(filename)
X = []
y = []
labels = []
for k in sorted(dataset):
v = dataset[k]
if len(v) < 11 or 'human subject' not in k:
continue
for x in v:
X.append(x)
y.append(len(labels))
logging.info('%s: %d examples', k, len(v))
labels.append(k)
label_length = max(len(l) for l in labels)
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=12345)
Model = LogisticRegression
scores = cross_val_score(Model(), X_train, y_train, cv=9, n_jobs=3)
logging.info('%s: scores %.3f +/- %.3f', filename, scores.mean(), scores.std() / 3)
f1s = cross_val_score(Model(), X_train, y_train, cv=9, n_jobs=3, scoring='f1')
logging.info('%s: f1s %.3f +/- %.3f', filename, f1s.mean(), f1s.std() / 3)
model = Model()
y_predict = model.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_predict)
logging.info('%s: confusion?\n%s', filename, cm)
for l, p, r, f in zip(labels,
precision_score(y_test, y_predict, average=None),
recall_score(y_test, y_predict, average=None),
f1_score(y_test, y_predict, average=None)):
logging.info('%s: %s P %.4f R %.4f F1 %.4f', filename, l.rjust(label_length), p, r, f)
if Model == RandomForestClassifier:
logging.info('top features: %s', (-model.feature_importances_).argsort()[:3])
if Model == LogisticRegression:
for i, l in enumerate(labels[:-1]):
coefs = model.coef_[i]
tops = (-coefs).argsort()
feats = ('{:3d} ({:5.2f})'.format(t, coefs[t]) for t in tops[:3])
logging.info('%s: %s: top features %s',
filename, l.rjust(label_length), ', '.join(feats))
if PLOT_CM:
ax = lmj.plot.axes(111, spines=False)
ax.imshow(cm)
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(labels)
ax.set_xlabel('True label')
ax.set_yticks(range(len(labels)))
ax.set_yticklabels(labels)
ax.set_ylabel('Predicted label')
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
| mit |
mantidproject/mantid | Testing/SystemTests/scripts/performance/analysis.py | 3 | 22562 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
""" Module containing functions for test
performance analysis, plotting, and saving
to other formats (CSV, PDF) """
import testresult
import os
import sys
import sqlresults
from sqlresults import get_results
import matplotlib
from pylab import *
import numpy as np
import datetime
import random
# This is the date string format as returned by the database
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
#============================================================================================
def get_orderby_clause(last_num):
"""Returns a order by clause that limits to the last # revisions """
if last_num > 0:
return " ORDER BY revision DESC limit %d" % last_num
else:
return ''
#============================================================================================
def get_data(name='', type='', x_field='revision', y_field='runtime', last_num=-1):
"""Get the test runtime/iteration as a function of an X variable.
Parameters
----------
name :: full name of the test
type :: type of test to filter by
x_field :: name of the field for the X axis.
e.g. 'revision' (default)
or 'date' : exact date/time of launch
or 'index' : using the date, but returning an index of build #
instead of the date (better scaling)
last_num :: only get the last this-many entries from the table, sorted by revision.
if < 0, then get everything
Returns
-------
x :: list of X values, sorted increasing
y :: list of runtime/iteration for each x
"""
results = get_results(name, type, where_clause='', orderby_clause=get_orderby_clause(last_num))
# Data dict. Key = X variable; Value = (iterations total, runtime total)
data = {}
for res in results:
# Get the x field value
if x_field == 'index':
x = res['date']
else:
x = res[x_field]
if data.has_key(x):
old = data[x]
iters = old[0] + 1 # Iterations
runtime = old[1] + res[y_field]
else:
iters = 1
runtime = res[y_field]
# Save the # of iterations and runtime
data[x] = (iters, runtime)
# Now make a sorted list of (x, runtime/iteration)
sorted = [(x, y[1]/y[0]) for (x,y) in data.items()]
sorted.sort()
x = [a for (a,b) in sorted]
# For index, convert into an integer index
if x_field == 'index':
x = range( len(x) )
y = [b for (a,b) in sorted]
return (x,y)
#============================================================================================
def get_unique_fields(results, field):
"""Given a list of TestResult, return a
list of all unique values of 'field'"""
out = set()
for res in results:
out.add( res[field] )
return list(out)
#============================================================================================
def get_results_matching(results, field, value):
"""Given a list of TestResult,
return a list of TestResult's where 'field' matches 'value'."""
out = []
for res in results:
if res[field] == value:
out.append(res)
return out
#============================================================================================
def smart_ticks(index, values):
"""On the current figure, set the ticks at X positions
given by index, with value given by values (ints).
But it tries to space them out in a reasonable way.
"""
if type(values[0]).__name__ == "unicode":
# Make the array of dates
dates = []
for val in values:
try:
datetime.datetime.strptime(val, DATE_STR_FORMAT)
dates.append(val)
except:
pass
if len(dates) == 0: return
td = dates[-1] - dates[0]
if (td < datetime.timedelta(hours=1)):
values_str = [d.strftime("%M:%S") for d in dates]
elif (td < datetime.timedelta(days=1)):
values_str = [d.strftime("%H:%M") for d in dates]
else:
values_str = [d.strftime("%m-%d, %H:%M") for d in dates]
else:
# convert to list of strings
values_str = [str(val) for val in values]
if len(values_str) == 0: return
w = gcf().get_figwidth()*gcf().get_dpi()
spacing = w/len(index)
tick_index = []
tick_strings = []
space_available = 0
for i in range(len(index)):
s = str(values_str[i]);
s_width = (len(s)+1) * 12.0 # About 12 pixels per letter? And add a space
space_available +=spacing
if space_available >= s_width:
space_available = 0
tick_index.append(i)
tick_strings.append(s)
xticks( tick_index, tick_strings )
#============================================================================================
def plot_success_count(type='system', last_num=-1, x_field='revision'):
""" Plot the count of successful/failed tests vs revision number
Parameters
----------
type :: 'system', or 'performance'
"""
results = get_results('', type, where_clause='', orderby_clause=get_orderby_clause(last_num))
revisions = get_unique_fields(results, x_field)
# Go through each revision
success = []
fail = []
for revision in revisions:
these = get_results_matching(results, x_field, revision)
succeeded = 0
failed = 0
for res in these:
if res["success"]:
succeeded += 1
else:
failed += 1
# Keep the list of them
success.append(succeeded)
fail.append(failed)
figure()
revisions = np.array(revisions)
fail = np.array(fail)
success = np.array(success)
index = np.arange(len(revisions))
# p1 = bar(index, fail, color='r')
# p2 = bar(index, success, color='g', bottom=fail)
# legend( (p1[0], p2[0]), ('Failure', 'Success') )
p1 = fill_between(index, fail, 0, color='r')
p2 = fill_between(index, success+fail, fail, color='g')
#legend( (p1, p2), ('Failure', 'Success') )
smart_ticks( index, revisions)
ylabel('Success/Fail')
xlabel(x_field)
revsare = "all revs"
if last_num > 0: revsare = "last %d revs" % last_num
title("Success/Fail History of %s tests (%s)" % (type, revsare))
#============================================================================================
def plot_runtime(*args, **kwargs):
""" Call get_data()
Parameters
----------
- See get_data() for the full list
"""
(x,y) = get_data(*args, **kwargs)
figure()
index = np.arange(len(x))
plot(index,y,'-b.')
smart_ticks( index, x)
ylabel('Runtime/iteration (sec)')
xlabel(kwargs['x_field'])
last_num =kwargs.get('last_num',-1)
if last_num > 0:
title("Runtime History of %s (last %d revs)" % (kwargs['name'], kwargs["last_num"]) )
else:
title("Runtime History of %s (all revs)" % kwargs['name'])
#============================================================================================
def plot_memory(*args, **kwargs):
""" Call get_data()
Parameters
----------
- See get_data() for the full list
"""
(x,y) = get_data(*args, **kwargs)
figure()
index = np.arange(len(x))
plot(index,y,'-b.')
smart_ticks( index, x)
ylabel("Memory 'loss' (MB)")
xlabel(kwargs['x_field'])
last_num =kwargs.get('last_num',-1)
if last_num > 0:
title("Memory History of %s (last %d revs)" % (kwargs['name'], kwargs["last_num"]) )
else:
title("Memory History of %s (all revs)" % kwargs['name'])
# The default HTML header
default_html_header = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html>
<head><LINK href="report.css" rel="stylesheet" type="text/css"></head>
"""
default_html_footer = """</body></html>"""
#============================================================================================
def make_css_file(path):
""" Make a save the report.css file to be used by all html """
default_css = """
table
{
border-collapse:collapse;
background-color:FFAAAA;
}
table, th, td
{
border: 1px solid black;
padding: 2px 6px;
}
.failedrow, .failedrow TD, .failedrow TH
{
background-color:#FFAAAA;
color:black;
}
.alternaterow, .alternaterow TD, .alternaterow TH
{
background-color:#FFFFAA;
color:black;
}
.error
{
color:red;
font-weight: bold;
}
"""
f = open(os.path.join(path, "report.css"), 'w')
f.write(default_css)
f.close()
#============================================================================================
def make_environment_html(res):
"""Return a HTML string with details of test environment, taken from the
'res' TestResult object"""
html = """<table border=1>
<tr><th>Host name:</th> <td>%s</td> </tr>
<tr><th>Environment:</th> <td>%s</td> </tr>
<tr><th>Type of runner:</th> <td>%s</td> </tr>
</table>
""" % (res['host'], res['environment'], res['runner'])
return html
#============================================================================================
def make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4, last_num):
""" Create a detailed HTML report for the named test """
html = default_html_header
html += """<h1>Detailed report for %s</h1><br>""" % (name)
html += """<img src="%s" alt="runtime vs revision number (latest %d entries)" />\n""" % (fig1, last_num)
html += """<img src="%s" alt="runtime vs revision number" />\n""" % (fig2)
html += """<img src="%s" alt="memory vs revision number (latest %d entries)" />\n""" % (fig3, last_num)
html += """<img src="%s" alt="memory vs revision number" />\n""" % (fig4)
html += """<h3>Test Results</h3>"""
fields = ['revision', 'date', 'commitid', 'compare', 'status', 'runtime', 'cpu_fraction', 'memory_change', 'variables']
table_row_header = "<tr>"
for field in fields:
if field == "runtime": field = "Runtime/Iter."
if field == "memory_change": field = "Memory 'loss'"
field = field[0].upper() + field[1:]
table_row_header += "<th>%s</th>" % field
table_row_header += "</tr>"
html += """<table border="1">""" + table_row_header
table_html = ''
results = get_results(name, type='', where_clause='')
sorted = [(res["revision"], res["variables"], res["date"], res) for res in results]
sorted.sort(reverse=False)
count = 0
last_rev = 0
commitid = ''
last_commitid = ''
row_class = ''
table_rows = []
for (rev, variable, date, res) in sorted:
table_row_html = ''
if (rev != last_rev):
# Changed SVN revision. Swap row color
if row_class == '':
row_class = "class=alternaterow"
else:
row_class = ''
last_rev = rev
if commitid != last_commitid:
last_commitid = commitid
if res["success"]:
table_row_html += "<tr %s>\n" % row_class
else:
table_row_html += "<tr class=failedrow>\n"
for field in fields:
val = ''
if field == 'compare':
# Comparison to previous commit, if anything can be done
if (last_commitid != ""):
val = """<a href="https://github.com/mantidproject/mantid/compare/%s...%s">diff</a>""" % (last_commitid, commitid)
else:
# Normal fields
val = res[field]
# Trim the fractional seconds
if field=="date":
val = str(val)[0:19]
# Add a trac link
if field=="commitid":
commitid = val
partial_commitid = val
if (len(partial_commitid) > 7): partial_commitid = partial_commitid[0:7];
val = """<a href="https://github.com/mantidproject/mantid/commit/%s">%s</a>""" % (commitid, partial_commitid)
if field=="runtime":
val = "%.3f" % (res["runtime"])
table_row_html += "<td>%s</td>" % val
table_row_html += "\n</tr>\n"
table_rows.append(table_row_html)
# Now print out all the rows in reverse order
table_rows.reverse()
for row in table_rows:
html += row
# # Add the row header every 30 entries
# count += 1
# if count % 30 == 0: html += table_row_header
# And one more at the end for good measure
html += table_row_header
html += "</table>"
if len(results)> 0:
html += """<h3>Environment</h3>
%s""" % make_environment_html(results[0])
html += default_html_footer
# last_date = sorted[-1][1]["date"]
# results = get_results(name, type='', get_log=False, where_clause=" date = '%s'" % last_date)
# if len(results)>0:
# html +=
f = open(os.path.join(basedir, "%s.htm" % name), "w")
html = html.replace("\n", os.linesep) # Fix line endings for windows
f.write(html)
f.close()
#============================================================================================
def how_long_ago(timestr):
"""Returns a string giving how long ago something happened,
in human-friendly way """
import time
now = datetime.datetime.now()
then = datetime.datetime.strptime(timestr, DATE_STR_FORMAT)
td = (now-then)
sec = td.seconds
min = int(sec / 60)
hours = int(min / 60)
days = td.days
weeks = int(days / 7)
sec = sec % 60
min = min % 60
hours = hours % 24
days = days % 7
if weeks > 0:
return "%dw%dd" % (weeks,days)
elif days > 0:
return "%dd%dh" % (days, hours)
elif hours > 0:
return "%dh%dm" % (hours, min)
elif min > 0:
return "%dm%ds" % (min, sec)
else:
return "%ds" % (sec)
return ""
#============================================================================================
def get_html_summary_table(test_names):
"""Returns a html string summarizing the tests with these names """
html = """
<table ><tr>
<th>Test Name</th>
<th>Type</th>
<th>Status</th>
<th>When?</th>
<th>Total runtime (s)</th>
<th>Memory 'loss'</th>
"""
for name in test_names:
res = sqlresults.get_latest_result(name)
if not res is None:
# Calculate how long ago
if not res["success"]:
html += """<tr class="failedrow">"""
else:
html += """<tr>"""
html += """<td><a href="%s.htm">%s</a></td>""" % (name, name)
html += """<td>%s</td>""" % res['type']
html += """<td>%s</td>""" % res['status']
# Friendly date
try:
date = datetime.datetime.strptime(res['date'], DATE_STR_FORMAT)
html += """<td>%s</td>""" % date.strftime("%b %d, %H:%M:%S")
except:
html += """<td></td>"""
html += """<td>%s</td>""" % res['runtime']
html += """<td>%s</td>""" % res['memory_change']
html += """</tr>"""
html += """</table>"""
return html
#============================================================================================
def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
""" HTML report for a subproject set of tests.
starts_with : the prefix of the test name
Returns: (filename saved, HTML for a page with ALL figures in it)
"""
basedir = os.path.abspath(path)
if not os.path.exists(basedir):
os.mkdir(basedir)
# Detect if you can do figures
dofigs = True
try:
figure()
rcParams['axes.titlesize'] = 'small'
except:
dofigs = False
# Start the HTML
overview_html = ""
# ------ Find the test names of interest ----------------
# Limit with only those tests that exist in the latest rev
latest_rev = sqlresults.get_latest_revison()
temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
# Filter by their start
test_names = []
for name in temp_names:
if name.startswith(starts_with):
test_names.append(name)
test_names.sort()
# -------- Report for each test ------------------------
for name in test_names:
print("Plotting", name)
overview_html += """<hr><h2>%s</h2>\n""" % name
# Path to the figures
fig1 = "%s.runtime.v.revision.png" % name
fig2 = "%s.runtime.v.revision.ALL.png" % name
fig3 = "%s.memory.v.revision.png" % name
fig4 = "%s.memory.v.revision.ALL.png" % name
if dofigs:
# Only the latest X entries
plot_runtime(name=name,x_field=x_field,last_num=last_num)
savefig(os.path.join(basedir, fig1))
close()
# Plot all svn times
plot_runtime(name=name,x_field=x_field,last_num=-1)
savefig(os.path.join(basedir, fig2))
close()
# Only the latest X entries
plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=last_num)
savefig(os.path.join(basedir, fig3))
close()
# Plot all svn times
plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=-1)
savefig(os.path.join(basedir, fig4))
close()
overview_html += """<img src="%s" alt="runtime vs revision number" />""" % (fig1)
overview_html += """<img src="%s" alt="memory vs revision number" />\n""" % (fig3)
make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4, last_num)
detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
<br><br>
""" % (name, name)
overview_html += detailed_html
filename = starts_with + ".htm"
return (filename, overview_html)
#============================================================================================
def generate_html_report(path, last_num, x_field='revision'):
"""Make a comprehensive HTML report of runtime history for all tests.
Parameters
----------
path :: base path to the report folder
last_num :: in the shorter plot, how many SVN revs to show?
x_field :: the field to use as the x-axis. 'revision' or 'date' make sense
"""
basedir = os.path.abspath(path)
if not os.path.exists(basedir):
os.mkdir(basedir)
# Make the CSS file to be used by all HTML
make_css_file(path)
# Detect if you can do figures
dofigs = True
try:
figure()
except:
dofigs = False
# --------- Start the HTML --------------
html = default_html_header
html += """<h1>Mantid System Tests Auto-Generated Report</h1>"""
html += """<p><a href="overview_plot.htm">See an overview of performance plots for all tests by clicking here.</a></p> """
if not dofigs:
html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""
html += """<h2>Run Environment</h2>
%s
""" % ( make_environment_html(sqlresults.get_latest_result()) )
overview_html = ""
# ------ Find the test names of interest ----------------
# Limit with only those tests that exist in the latest rev
latest_rev = sqlresults.get_latest_revison()
test_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
test_names.sort()
# ------ Find a list of subproject names --------
subprojects = set()
for name in test_names:
n = name.find(".")
if n > 0:
subprojects.add( name[:n] )
subprojects = list(subprojects)
subprojects.sort()
html += """<h2>Test Subprojects</h2>
<big>
<table cellpadding="10"> """
for subproject in subprojects:
(filename, this_overview) = generate_html_subproject_report(path, last_num, x_field, subproject)
overview_html += this_overview
html += """<tr> <td> <a href="%s">%s</a> </td> </tr>
""" % (filename, subproject)
html += """</table></big>"""
# --------- Table with the summary of latest results --------
html += """<h2>Overall Results Summary</h2>"""
html += get_html_summary_table(test_names)
# -------- Overall success history graphs ------------
#if dofigs:
# # We report the overall success
# fig_path = "OverallSuccess.png"
# plot_success_count(type='',last_num=last_num, x_field=x_field)
# savefig(os.path.join(basedir, fig_path))
# close()
#
# fig_path2 = "OverallSuccess.ALL.png"
# plot_success_count(type='',last_num=-1, x_field=x_field)
# savefig(os.path.join(basedir, fig_path2))
# close()
#
# html += """<h2>Overall Success/Failure</h2>
# <img src="%s" />
# <img src="%s" />
# """ % (fig_path, fig_path2)
html += default_html_footer
f = open(os.path.join(basedir, "report.htm"), "w")
html = html.replace("\n", os.linesep) # Fix line endings for windows
f.write(html)
f.close()
# -------- Overview of plots ------------
f = open(os.path.join(basedir, "overview_plot.htm"), "w")
overview_html = overview_html.replace("\n", os.linesep) # Fix line endings for windows
f.write(overview_html)
f.close()
print("Report complete!")
#============================================================================================
if __name__ == "__main__":
sqlresults.set_database_filename("MyFakeData.db")
# Make up some test data
if 0:
if os.path.exists("MyFakeData.db"): os.remove("MyFakeData.db")
sqlresults.generate_fake_data(300)
generate_html_report("../Report", 50)
# plot_runtime(name='MyFakeTest', x_field='revision')
# plot_runtime(name='MyFakeTest', x_field='date')
# plot_success_count()
# show()
| gpl-3.0 |
Kitware/minerva | gaia_tasks/inputs.py | 1 | 7301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
import tempfile
from base64 import b64encode
import fiona
import geopandas
import requests
from six import StringIO
from gaia.core import GaiaException
from gaia.filters import filter_pandas
from gaia.inputs import GaiaIO
from gaia import formats, types
import girder_worker
from girder.constants import PACKAGE_DIR
import girder_client
class MinervaVectorIO(GaiaIO):
"""
Interface to Minerva item geojson
"""
type = types.VECTOR
default_output = formats.JSON
def __init__(self, item_id=None, token=None, name='gaia_result.json',
uri='', **kwargs):
"""
Read and write GeoJSON data to/from Girder
:param item_id: Item id to read/write from/to
:param uri: location of temporary file
:param kwargs: Other keyword arguments
"""
self.id = item_id
self.token = token
if uri:
self.uri = uri
else:
tmpdir = tempfile.mkdtemp()
self.uri = tempfile.mkstemp(suffix='.json', dir=tmpdir)[1]
self.filename = name
girderHost = None
girderPort = None
girderScheme = None
try:
girderHost = girder_worker.config.get('minerva', 'girder_host_name')
girderPort = girder_worker.config.get('minerva', 'girder_host_port')
girderScheme = girder_worker.config.get('minerva', 'girder_host_scheme')
except:
girderHost = 'localhost'
girderPort = '8080'
girderScheme = 'http'
client = girder_client.GirderClient(host=girderHost, port=girderPort, scheme=girderScheme)
client.token = token
self.client = client
self.meta = self.client.getItem(item_id)
super(MinervaVectorIO, self).__init__(uri=self.uri, **kwargs)
def save_geojson(self):
"""
Save GeoJSON from a Minerva item
TODO: Use caching like the girder_worker.girder_io plugin
TODO: Separate methods for saving geojson from different sources
TODO: Get geojson via WFS calls for local WMS vector layers
"""
minerva = self.meta['meta']['minerva']
if 'geojson_file' in minerva:
# Uploaded GeoJSON is stored as a file in Girder
self.client.downloadFile(minerva['geojson_file']['_id'], self.uri)
elif 'geojson' in minerva:
# Mongo collection is stored in item meta
geojson = json.loads(minerva['geojson']['data'])
# TODO: Don't use mongo metadata for filename
with open(self.uri, 'w') as outjson:
json.dump(geojson, outjson)
# elif 'dataset_type' in minerva and minerva['dataset_type'] == 'wms':
# from girder.plugins.minerva.utility.minerva_utility import decryptCredentials
# servers = config.getConfig()['gaia_minerva_wms']['servers']
# if minerva['base_url'] in servers:
# params = 'srsName=EPSG:4326&typename={name}&outputFormat=json'\
# + '&version=1.0.0&service=WFS&request=GetFeature'
# url = '{base}?{params}'.format(
# base=minerva['base_url'].replace('/wms', '/wfs'),
# params=params.format(name=minerva['type_name'])
# )
# headers = {}
# if 'credentials' in minerva:
# credentials = (minerva['credentials'])
# basic_auth = 'Basic ' + b64encode(
# decryptCredentials(credentials))
# headers = {'Authorization': basic_auth}
# with open(self.uri, 'w') as outjson:
# r = requests.get(url, headers=headers)
# r.raise_for_status()
# json.dump(r.json(), outjson)
# else:
# raise GaiaException('This server {} is not supported. \n{}'.format(minerva))
else:
raise GaiaException('Unsupported data source. \n{}'.format(minerva))
def read(self, epsg=None, **kwargs):
"""
Read vector data from Girder
:param format: Format to return data in (default is GeoDataFrame)
:param epsg: EPSG code to reproject data to
:return: Data in GeoJSON
"""
if self.data is None:
self.save_geojson()
self.data = geopandas.read_file(self.uri)
if self.filters:
self.filter_data()
out_data = self.data
if epsg and self.get_epsg() != epsg:
out_data = geopandas.GeoDataFrame.copy(out_data)
out_data[out_data.geometry.name] = \
self.data.geometry.to_crs(epsg=epsg)
out_data.crs = fiona.crs.from_epsg(epsg)
if format == formats.JSON:
return out_data.to_json()
else:
return out_data
def write(self, filename=None, as_type='json'):
"""
Write data (assumed geopandas) to geojson or shapefile
:param filename: Base filename
:param as_type: json or memory
:return: file girder uri
"""
filedata = self.data.to_json()
if not filename:
filename = self.filename
if as_type == 'json':
self.uri = self.uri.replace(os.path.basename(self.uri), filename)
self.create_output_dir(self.uri)
with open(self.uri, 'w') as outfile:
outfile.write(filedata)
elif as_type == 'memory':
pass
else:
raise NotImplementedError('{} not a valid type'.format(as_type))
fd = StringIO(filedata)
upload = self.client.uploadFile(parentId=self.id, stream=fd,
size=len(filedata), name=filename)
item_meta = self.client.getItem(self.id)['meta']
item_meta['minerva']['geojson_file'] = {
'_id': upload['_id'],
'name': upload['name']
}
item_meta['minerva']['geo_render'] = {
'type': 'geojson',
'file_id': upload['_id']
}
self.client.addMetadataToItem(self.id, item_meta)
return os.path.join(
self.client.urlBase, 'file', upload['_id'], 'download')
def filter_data(self):
"""
Apply filters to the dataset
:return:
"""
self.data = filter_pandas(self.data, self.filters)
PLUGIN_CLASS_EXPORTS = [
MinervaVectorIO
]
| apache-2.0 |
mkness/TheCannon | code/makeredclumpplot.py | 1 | 10984 | #!/usr/bin/python
import pyfits
import numpy as np
import pickle
from numpy import savetxt
import matplotlib
from matplotlib import pyplot
#a.close()
import scipy
from scipy import interpolate
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.ticker import ScalarFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=True)
rc('text', usetex=True)
rc('font', family='serif')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib import pyplot
from matplotlib.pyplot import *
import matplotlib
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
import matplotlib as mpl
mpl.rcParams['text.usetex']=True
mpl.rcParams['text.latex.unicode']=True
rcParams["xtick.labelsize"] = 14
rcParams["ytick.labelsize"] = 14
s = matplotlib.font_manager.FontProperties()
s.set_size(18)
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
ymajorLocator1 = MultipleLocator(0.005)
ymajorLocator2 = MultipleLocator(0.1)
ymajorLocator3 = MultipleLocator(0.1)
xminorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(5)
ymajorLocator = MultipleLocator(50)
xmajorLocator = MultipleLocator(10)
######rcParams['figure.figsize'] = 22.0, 10.0
import pickle
a = open('coeffs_2nd_order_5.pickle', 'r') # was originally just coeffs_2nd_order_5.pickle but that is corrupted
bc = pickle.load(a)
coeffs = bc[4]
wl_star = bc[0][:,0,0]
a.close()
def makespec(t,g,feh,alpha,mass):
nlabels = 5
features_data = np.ones((nlabels, 1))
labels = [t,g,feh,alpha,log(mass)] # put all in
offsets = [4.85481344e+03, 2.58309295e+00, 8.87884779e-04 , 4.4598135e-02, 1]
labels = array(labels)
offsets = array(offsets)
features_data = np.hstack((1, labels - offsets))
newfeatures_data = np.array([np.outer(labels-offsets, labels-offsets)[np.triu_indices(nlabels)] ])
newfeatures2_data = array([( labels[1] - offsets[1])**3]).T
features_data_final = np.hstack((features_data, newfeatures_data.flatten(), newfeatures2_data.flatten()))
jj= 0
model_gen2 = np.dot(coeffs,features_data_final.T)
return model_gen2
#filein = './Jon/redclump_sample_A_updatedvalues_only.txt'
#a = open(filein, 'r')
#al = a.readlines()
##tvals,gvals,fehvals,alphavals, agevals, massvals = genfromtxt(filein, usecols = (5,6,7,8,9,10) , unpack =1)
#ids = []
#for each in al:
# ids.append(each.split()[0])
#plotstars(file1, wl_star,params1, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits", cent_wl)
def plotstars(filein, wl, params,prefix,cent_wl1, cent_wl2,cent_wl3, cent_wl4,cent_wl5, cent_wl6, cent_wl7, cent_wl8, bw):
rcParams['figure.figsize'] = 14.0, 14.0
fig, temp = pyplot.subplots(4,2, sharex=False, sharey=False)
ax1 = temp[0,0]
ax2 = temp[1,0]
ax3 = temp[2,0]
ax4 = temp[3,0]
ax5 = temp[0,1]
ax6 = temp[1,1]
ax7 = temp[2,1]
ax8 = temp[3,1]
t1,g1,feh1,alpha1,mass1 = params[0], params[1],params[2], params[3],params[4]
param_str = " Teff="+str(np.int(t1))+", logg="+str(np.round(g1,1))+", [Fe/H]="+ str(np.round(feh1,2))+r", [$\alpha$/Fe]="+str(np.round(alpha1,2))+", Mass="+str(np.round(mass1,2))
def _plot_something(filein,params, ax, wl, indx, color, lw=1.0, label=""):
a = pyfits.open(filein)
data = a[1].data
sigma = a[2].data
bad = sigma > 0.1
data[bad] = None
model_aspcap = a[3].data
t1,g1,feh1,alpha1,mass1 = params[0], params[1],params[2], params[3],params[4]
model_tc = makespec(t1,g1,feh1,alpha1,mass1 )
#if indx == 0:
# ax.plot(wl,model_aspcap, color='r', lw=lw,alpha = 0.5)
lw2 = 0.1
lw1 = 2.
lw2 = 1.
if indx == 1:
ax.plot(wl,model_aspcap, color='gray', lw=lw1,alpha = 0.6, label= "ASPCAP model", linestyle = 'dashed')
ax.plot(wl,model_tc, color='r', lw=lw1,alpha = 0.6, label = 'The Cannon model', linestyle = '-')
ax.plot(wl,data , color='k', lw=lw2,label = 'data') #, label=label)
return None
axes = [ ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
for ax in axes:
_plot_something(filein, params,ax, wl_star, 1,'k')
#_plot_something(filein, params,ax1, wl_star, 1,'k')
#_plot_something(filein, params,ax3, wl_star,1, 'k')
#_plot_something(filein, params,ax2, wl_star, 1,'k')
#_plot_something(filein, params,ax4, wl_star,1, 'k')
leg = ax1.legend(numpoints=1, fontsize = 12,loc = 3, frameon = False)
ax5.text(cent_wl5 - 13, 0.72, param_str)
#for legobj in leg.legendHandles:
# legobj.set_linewidth(2.0)
# ax3.plot(wl_star, model_tc, 'b',alpha=0.5)
#fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0.09)
fig.subplots_adjust(hspace=0.30)
axlist = [ax1,ax2,ax3,ax4, ax5, ax6, ax7, ax8]
axlist1 = [ax1,ax2,ax3,ax4]
axlist2 = [ax3,ax4]
fs = 16
for ax in axlist1:
ax.set_ylabel("Normalized Flux", fontsize = fs)
for ax in [ax1,ax1]:
ax.set_xlim(cent_wl1-bw,cent_wl1+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
#for ax in axlist2:
for ax in [ax2,ax2]:
ax.set_xlim(cent_wl2-bw,cent_wl2+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax3,ax3]:
ax.set_xlim(cent_wl3-bw,cent_wl3+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax4,ax4]:
ax.set_xlim(cent_wl4-bw,cent_wl4+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax5,ax5]:
ax.set_xlim(cent_wl5-bw,cent_wl5+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax6,ax6]:
ax.set_xlim(cent_wl6-bw,cent_wl6+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax7,ax7]:
ax.set_xlim(cent_wl7-bw,cent_wl7+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in [ax8,ax8]:
ax.set_xlim(cent_wl8-bw,cent_wl8+bw) # logg1
ax.set_xlabel("wavelength $\lambda$" + r" (\mbox{\AA})", fontsize = fs,labelpad = 5)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
for ax in axlist:
ax.set_ylim(0.69,1.05) # logg1
ax1.set_ylim(0.49,1.05) # logg1
#ax1.set_xticklabels([])
#ax3.set_xticklabels([])
ax5.set_yticklabels([])
ax6.set_yticklabels([])
ax7.set_yticklabels([])
ax8.set_yticklabels([])
#savefig_mkn(fig, prefix, transparent=False, bbox_inches='tight', pad_inches=0.5)
return
def savefig_mkn(fig, prefix, **kwargs):
suffix1 = ".png"
print "writing %s" % (prefix + suffix1)
fig.savefig(prefix + suffix1, **kwargs)
fn = '/Users/ness/new_laptop/Apogee_ages/Jon/aspcapStar-r5-v603-'
nd = '.fits'
file1,file2,file3,file4,file5 = fn+'2M00000211+6327470'+nd, fn+'2M00000446+5854329'+nd , fn+'2M00001242+5524391'+nd , fn+'2M23591255+5724354'+nd , fn+'2M00001962+6502429'+nd
file6,file7 =fn+'2M23591931+6244094'+nd, fn+'2M00010744+6445170'+nd
t1,g1,feh1,alpha1,age1, mass1 = 4708.49983734, 2.17652168722, 0.0617198884138, 0.015425129037, 1.55488236009, 1.88141910392
t2,g2,feh2,alpha2,age2, mass2 = 4745.52159963, 2.35896450654, -0.0142214905547, 0.0355349312863, 1.09579636191, 2.10351779915
t3,g3,feh3,alpha3,age3,mass3 = 4634.56555856, 2.27026176286, 0.148285193614, 0.0544413463398, 3.25464515338, 1.48482119829
t4,g4,feh4,alpha4,age4,mass4 = 5065.9005193, 2.65343988317, -0.272045304817, 0.0425327062973, 0.463644013007, 2.73168129836
t5,g5, feh5, alpha5, age5, mass5 = 4547.8021233, 2.36446969168, 0.149600284589, 0.0625720823962, 1.47742666725, 1.94314195429
t6,g6,feh6,alpha6,age6,mass6 = 4592.01662402, 2.37847512277, -0.0995352783245, 0.0849492206682 ,5.81821837085, 1.17919262914
t7,g7,feh7,alpha7,age7,mass7 = 4843.4531731, 2.49841383705, -0.0586235260442, 0.0386181490951, 10.5928384269, 1.00701251325
params1 = [4708,2.17,0.06,0.015,1.88]
params4= [t4,g4,feh4,alpha4,mass4]
params7= [t7,g7,feh7,alpha7,mass7]
#logg
cent_wl1 = 15770 # log g max
cent_wl5 = 16810 # log g 2nd
# t
cent_wl2 = 15339 # teff 2nd max
cent_wl6 = 15720 # teff max
# feh,alpha
cent_wl3 = 15221.5 # highest feh
cent_wl7 = 16369 # highest alpha
# mass
cent_wl4 = 15241 # highest mass for _5 and _5 HWR
cent_wl8 = 15332 # second highest mass for _5
cent_wl8 = 16191 # second highest mass for _5
# V
cent_wla = 15929 # highest mass for _5 and _5 HWR
cent_wlb = 16410.7 # second highest mass for _5
#plotdata('coeffs_2nd_order_5.pickle', wl3,100, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/coeffs_t_3", cent_g1, cent_g2,0)
# feh,alpha
#cent_g1 = 15221.5 # highest feh
#cent_g2 = 16369 # highest alpha
#plotdata('coeffs_2nd_order_5.pickle', wl3,100, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/coeffs_af_3", cent_g1, cent_g2,2)
# mass
#plotstars(file1, wl_star,params1, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_1", cent_wl1,cent_wl2,cent_wl3,cent_wl4, cent_wl5, cent_wl6, cent_wl7, cent_wl8, 14)
#plotstars(file7, wl_star,params7, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_7", cent_wl1,cent_wl2,cent_wl3,cent_wl4,cent_wl5, cent_wl6, cent_wl7,cent_wl8, 14)
plotstars(file1, wl_star,params1, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_1", cent_wl1,cent_wl2,cent_wl3,cent_wl4,cent_wl5, cent_wl6, cent_wl7,cent_wl8, 14)
#plotstars(file2, wl_star,params2, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_2", cent_wl1,cent_wl2, cent_wl3, cent_wl4, cent_wl5, cent_wl6, cent_wl7,cent_wl8,14)
plotstars(file7, wl_star,params7, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_7", cent_wl1,cent_wl2,cent_wl3,cent_wl4,cent_wl5, cent_wl6, cent_wl7,cent_wl8, 14)
#plotstars(file7, wl_star,params7, "/Users/ness/new_laptop/TheCannon/TheCannon/documents/mass_and_age/plots/spectra_fits_elem", cent_wl1,cent_wl2,cent_wl3,cent_wl4,cent_wl5, cent_wl6, cent_wla,cent_wlb, 14)
show()
| mit |
magnusax/ml-meta-wrapper | gazer/classifiers/random_forest.py | 1 | 2959 | from ..base import BaseClassifier
from scipy.stats import randint
from sklearn.ensemble import RandomForestClassifier
class MetaRandomForestClassifier(BaseClassifier):
"""
Implementation of random forest classifier:
http://scikit-learn.org/0.17/modules/generated/sklearn.ensemble.\
RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier
"""
def __init__(self, n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, bootstrap=True,
oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None):
self.name = "random_forest"
self.max_n_iter = 1000
self.init_params = {}
self.init_params['n_estimators'] = n_estimators
self.init_params['criterion'] = criterion
self.init_params['max_depth'] = max_depth
self.init_params['min_samples_split'] = min_samples_split
self.init_params['min_samples_leaf'] = min_samples_leaf
self.init_params['min_weight_fraction_leaf'] = min_weight_fraction_leaf
self.init_params['max_features'] = max_features
self.init_params['max_leaf_nodes'] = max_leaf_nodes
self.init_params['bootstrap'] = bootstrap
self.init_params['oob_score'] = oob_score
self.init_params['random_state'] = random_state
self.init_params['warm_start'] = warm_start
self.init_params['class_weight'] = class_weight
# Initialize algorithm and make it available
self.estimator = self._get_clf()
# Initialize dictionary with trainable parameters
self.cv_params = self._set_cv_params()
# Initialize list which can be populated with params to tune
self.cv_params_to_tune = []
def _get_clf(self):
return RandomForestClassifier(**self.init_params)
def get_info(self):
return {'does_classification': True, 'does_multiclass': True,
'does_regression': False, 'predict_probas': hasattr(self.estimator, 'predict_proba')}
def adjust_params(self, d):
return super().adjust_params(d)
def set_tune_params(self, params, num_params=1, mode='random', keys=list()):
return super().set_tune_params(params, num_params, mode, keys)
def _set_cv_params(self):
"""
Trainable params available in self.cv_params[i].keys() for i in len(self.cv_params)
"""
return [{
"max_depth": [None, 3, 5, 7],
"max_features": randint(1, 21),
"min_samples_split": randint(2, 21),
"min_samples_leaf": randint(1, 21),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [10, 64, 128, 512]},
] | mit |
chrisburr/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 45 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
dancingdan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 30 | 70017 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
slabanja/ase | ase/io/png.py | 3 | 1100 | from ase.io.eps import EPS
class PNG(EPS):
def write_header(self):
from matplotlib.backends.backend_agg import RendererAgg
try:
from matplotlib.transforms import Value
except ImportError:
dpi = 72
else:
dpi = Value(72)
self.renderer = RendererAgg(self.w, self.h, dpi)
#self.gc = GraphicsContextBase()
#self.gc.set_linewidth(2)
def write_trailer(self):
renderer = self.renderer
if hasattr(renderer._renderer, 'write_png'):
# Old version of matplotlib:
renderer._renderer.write_png(self.filename)
else:
x = renderer._renderer.buffer_rgba(0, 0)
from matplotlib import _png
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
self.filename, 72)
def write_png(filename, atoms, **parameters):
if isinstance(atoms, list):
assert len(atoms) == 1
atoms = atoms[0]
PNG(atoms, **parameters).write(filename)
| gpl-2.0 |
MaxNoe/cta_event_viewer | windows/__init__.py | 1 | 1265 | import Tkinter as tk
import matplotlib
matplotlib.use('tkagg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.axes import Axes
from plots import CameraPlot
class TelescopeEventView(tk.Frame, object):
""" A frame showing the camera view of a single telescope """
def __init__(self, root, telescope, data=None, *args, **kwargs):
self.telescope = telescope
super(TelescopeEventView, self).__init__(root)
self.figure = Figure(figsize=(5, 5), facecolor='none')
self.ax = Axes(self.figure, [0, 0, 1, 1], aspect=1)
self.ax.set_axis_off()
self.figure.add_axes(self.ax)
self.camera_plot = CameraPlot(telescope, self.ax, data, *args, **kwargs)
self.canvas = FigureCanvasTkAgg(self.figure, master=self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.config(highlightthickness=0)
@property
def data(self):
return self.camera_plot.data
@data.setter
def data(self, value):
self.camera_plot.data = value
self.canvas.draw()
| mit |
arjoly/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
didmar/blokus3d-python | src/blokus3d/interface.py | 1 | 4989 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# XXX if wrong coords are passed, you get 0 variations to choose from and it bugs !
from numpy.core.numeric import array
from numpy.core.fromnumeric import sort
from numpy.lib.arraysetops import unique
from matplotlib.pylab import flatten
from blokus3d.block import blockToASCII, blockVarToASCII, \
blockNames, blocks, blocksVar
from blokus3d.move import moveToASCII
from blokus3d.utils import fst, snd, third, unik
from itertools import takewhile
def findMove(gs, askApply=True):
moves = gs.legalMoves()
if len(moves)==1:
print "Only one move possible :\n", moveToASCII(moves[0])
else:
ok = False
while not ok:
# First, pick a block
blkId = findBlock(gs,candidates=unique(map(snd,moves)))
assert blkId != None # since we checked that len(lm) was > 0
# Filter the moves that have the selected block id
moves = filter(lambda m : m[1]==blkId, moves)
# Then, find the coordinates on the board
coords = findCoords(gs,candidates=unik(map(fst,moves)))
# Filter the moves that have the selected coordinates
moves = filter(lambda m : (m[0]==coords).all(), moves)
# Finally, find its variation
blkVarId = findVariation(gs,blkId, \
candidates=unique(map(third,moves)))
move = (coords,blkId,blkVarId)
print "You have selected :\n", moveToASCII(moves[0])
print "Is this the move you wanted ? [Y/n]"
if raw_input("") in ["n","N"]:
# Will start again with all legal moves possibles
moves = gs.legalMoves()
else:
ok = True
if askApply:
print "Do you want to play this move over the current gamestate ?",\
" [Y/n]"
if raw_input("") not in ["n","N"]:
gs.playMove(move)
return move
def findBlock(gs, candidates=None):
if candidates == None:
candidates = gs.playerBlocks[gs.nextPlayer]
if len(candidates)==0:
return None
print "Which block ?"
for blkId in candidates:
print "%d) %s" % (blkId+1, blockNames[blkId])
print blockToASCII(blocks[blkId])
return input("> ")-1
def findCoords(gs, candidates=None):
if candidates == None:
candidates=[]
# List all the possible z-level (heights)
zRange = list(takewhile(lambda x : x < gs.boardSize[2], \
sort(unique(flatten(gs.heightMap())))))
if zRange==[]:
print "Board is full, cannot find legal coordinates !"
return None
else:
zRange = sort(unique(map(third,candidates)))
# Do we have a choice on the z-level ?
if len(zRange)==1:
z = zRange[0]
else:
print "\n",gs.boardToASCII(markedCubes=candidates)
# Discard the z height max
if zRange[-1]==gs.boardSize[2]:
zRange = zRange[:-1]
z = -1+input("Which z-level ? (%d-%d)\n> " \
% (zRange[0]+1,zRange[-1]+1))
candidates = filter(lambda c: c[2]==z, candidates)
if len(candidates)>1:
# Display the z-level with xy coordinates as letter-number
print ' '+''.join(chr(97+x) for x in xrange(gs.boardSize[0]))
print ' +'+'-'*gs.boardSize[0]
lines = gs.boardToASCII(zRange=[z],markedCubes=candidates)\
.split('\n')
for y in xrange(gs.boardSize[1]):
print '%s |%s' % (str(y+1).zfill(2),lines[y])
print "\n"
xy = raw_input("Which xy coordinates ?\n> ")
return array([ord(xy[0])-97,int(xy[1:])-1,z])
else:
return candidates[0]
def findVariation(gs, blkId, candidates=None):
assert blkId in gs.playerBlocks[gs.nextPlayer]
# If no candidates are given,
# then any variation id might be selected
if candidates == None:
candidates = range(blocksVar[blkId].shape[2])
# XXX doesn't work, fix me !!
# # Associate each candidate variation id with its height
# varHeights = map(lambda blkVarId:
# blockHeight(blocksVar[blkId][:,:,blkVarId]),\
# candidates)
# print "varHeights = ",varHeights
# hRange = sort(unique(varHeights))
# print "hRange = ",hRange
# if len(hRange) > 1:
# h = input("What height does have the variation ? (%s)\n> " \
# % ','.join(map(str,hRange)))
# else:
# h = hRange[0]
# print "h = ",h
# candidates = [candidates[i] for i in find(array(varHeights)==h)]
if len(candidates) == 1:
return candidates[0]
print "Which variation ?"
for num, blkVarId in enumerate(candidates):
print "%d) [varId:%d]" % (num+1, blkVarId)
print blockVarToASCII(blkId, blkVarId, showOrigin=True)
i = input("> ") - 1
return candidates[i]
| mit |
openelections/openelections-data-ca | src/parse_special_primary_2015.py | 2 | 9037 | import io
import pandas as pd
import requests
import tempfile
import zipfile
import PyPDF2
from tabula import read_pdf
candidates = {'JOHN M. W. MOORLACH': 'John M. W. Moorlach',
'DONALD P. WAGNER': 'Donald P. Wagner',
'NAZ NAMAZI': 'Naz Namazi',
'SHARON RUNNER': 'Sharon Runner',
'JOSHUA C. CHANDLER': 'Joshua C. Chandler',
'JOSHUA CONAWAY': 'Joshua Conaway',
'STEVE HILL': 'Steve Hill',
'JERRY J. LAWS': 'Jerry J. Laws',
'RICHARD E. MACIAS': 'Richard E. Macias',
'JASON ZINK': 'Jason Zink'}
output_columns = ['county', 'precinct', 'office',
'district', 'party', 'candidate', 'votes']
p = {'Sharon Runner': 'REP',
'Terry Kremin': 'DEM',
'Susan Bonilla': 'DEM',
'Joan Buchanan': 'DEM',
'Michaela M. Hertle': 'REP',
'Steve Glazer': 'DEM',
'Joshua Conaway': 'DEM',
'Steve Hill': 'DEM',
'Richard E. Macias': 'DEM',
'Jerry J. Laws': 'REP',
'Joshua C. Chandler': 'NPP',
'Jason Zink': 'NPP'}
def prepare_output(df, county, district, cand):
df.columns = ['precinct'] + cand
df = pd.melt(df, id_vars='precinct',
value_vars=cand, var_name='candidate', value_name='votes').dropna()
df['party'] = df.candidate.apply(lambda x: p[x])
df = df.assign(county=county,
office='State Senate',
district=district)
df.votes = df.votes.astype(int)
df = df.groupby(
output_columns[:-1]).sum().reset_index()[output_columns]
return df
def parse_alameda():
sovc_xls = 'https://www.acgov.org/rov/elections/20150317/documents/sovc.xls'
primary = pd.read_excel(sovc_xls, sheetname='Sheet1')
# Select only contests of interest and the important columns
primary = primary.loc[(primary.index > 3) & (primary.index < 385)][
['Alameda County', 'Unnamed: 7', 'Unnamed: 8', 'Unnamed: 10', 'Unnamed: 11', 'Unnamed: 12']]
table = prepare_output(primary, 'Alameda', 7,
['Terry Kremin', 'Susan Bonilla', 'Joan Buchanan', 'Michaela M. Hertle', 'Steve Glazer'])
for x in ['candidate', 'district', 'office', 'precinct', 'county']:
table = table.sort_values(by=x, kind='mergesort')
table.to_csv(
'2015/20150317__ca__special__primary__alameda__precinct.csv', header=output_columns, index=False)
def parse_contra_costa():
sovc_xls = 'http://www.cocovote.us/wp-content/uploads/031715_ResultsByPct.xlsx'
primary = pd.read_excel(sovc_xls, sheetname=2)
primary = primary.loc[(primary.index > 2) & (primary.index < 458)][
['Return to table of content', 'Unnamed: 4', 'Unnamed: 7', 'Unnamed: 10', 'Unnamed: 13', 'Unnamed: 16']]
table = prepare_output(primary, 'Contra Costa', 7,
['Terry Kremin', 'Susan Bonilla', 'Joan Buchanan', 'Michaela M. Hertle', 'Steve Glazer'])
for x in ['candidate', 'district', 'office', 'precinct', 'county']:
table = table.sort_values(by=x, kind='mergesort')
table.to_csv(
'2015/20150317__ca__special__primary__contra_costa__precinct.csv', header=output_columns, index=False)
def parse_orange():
sovc_zip_url = 'http://www.ocvote.com/fileadmin/live/37sd2015/media.zip'
sovc_zip = requests.get(sovc_zip_url)
if sovc_zip.status_code != 200:
return
f = tempfile.NamedTemporaryFile()
f.write(sovc_zip.content)
sovc_zf = zipfile.ZipFile(f.name)
table = pd.read_csv(sovc_zf.open('contest_table.txt'))
table['votes'] = table[
['Absentee_votes', 'Early_votes', 'Election_Votes']].sum(axis=1)
table.Choice_party.fillna('W/I', inplace=True)
table = table.rename(
columns={'Precinct_Name': 'precinct',
'Candidate_name': 'candidate',
'Choice_party': 'party'}).assign(county='Orange',
office='State Senate',
district='37').replace(
{'candidate': candidates})
for x in ['candidate', 'district', 'office', 'precinct', 'county']:
table = table.sort_values(by=x, kind='mergesort')
table[output_columns].to_csv(
'2015/20150317__ca__special__primary__orange__precinct.csv', header=output_columns, index=False)
def los_angeles_sovc():
sovc_zip_url = 'https://www.lavote.net/documents/SVC/960_SVC_Excel.zip'
sovc_zip = requests.get(sovc_zip_url)
sovc_zip.raise_for_status()
f = tempfile.NamedTemporaryFile()
f.write(sovc_zip.content)
sovc_zf = zipfile.ZipFile(f.name)
df = pd.read_excel(sovc_zf.open(
'21ST_STATE_SENATE_U-T_03-17-15_Voter_Nominated_by_Precinct_960-3760.xls'))
df.columns = df.loc[1]
df = df[df.TYPE == 'TOTAL']
return pd.melt(df, id_vars=['PRECINCT'], value_vars=df.columns.tolist()[
8:-1], var_name='candidate', value_name='votes')
def los_angeles_writeins():
writeins_url = 'https://www.lavote.net/Documents/Election_Info/03172015_Certificate-of-Write-in-Votes_FINAL.pdf'
writeins_req = requests.get(writeins_url)
writeins_req.raise_for_status()
writeins_pdf = PyPDF2.PdfFileReader(io.BytesIO(writeins_req.content))
cands = writeins_pdf.getPage(0).extractText().split('\n')[58:82:2]
return [{'candidate': c, 'votes': int(v), 'PRECINCT': 'Write-In'} for
c, v in zip(cands[::2], cands[1::2])]
def parse_los_angeles():
output_columns = ['county', 'precinct', 'office',
'district', 'party', 'candidate', 'votes']
table = los_angeles_sovc().append(los_angeles_writeins())
table = table.assign(county='Los Angeles', office='State Senate', district='21').rename(
columns={'PRECINCT': 'precinct'}).replace({'candidate': candidates})
parties = {k: 'W/I' for k in candidates.values()}
parties.update(p)
table['party'] = table.candidate.apply(lambda x: parties[x])
for x in ['candidate', 'district', 'office', 'precinct', 'county']:
table = table.sort_values(by=x, kind='mergesort')
table[output_columns].to_csv(
'2015/20150317__ca__special__primary__los_angeles__precinct.csv', index=False)
def san_bernardino_sovc():
sovc_url = 'http://www.sbcountyelections.com/Portals/9/Elections/2015/0317/SOV%20Book.pdf'
primary = read_pdf(sovc_url, pages="1-6",
pandas_options={'error_bad_lines': False})
first = primary.loc[((primary.index > 2) & (primary.index < 64))][
['Unnamed: 0', 'Unnamed: 5']].rename(columns={'Unnamed: 5': 'votes'})
second = primary.loc[((primary.index > 67) & (primary.index < 129)) |
((primary.index > 132) & (primary.index < 194)) |
((primary.index > 197) & (primary.index < 243))][
['Unnamed: 0', 'Unnamed: 4']].rename(columns={'Unnamed: 4': 'votes'})
primary = read_pdf(sovc_url, pages="3,5",
pandas_options={'error_bad_lines': False})
third = primary.loc[primary.index > 2][
['Unnamed: 0', 'Unnamed: 5']].rename(columns={'Unnamed: 5': 'votes'})
pcand = 'SHARON RUNNER'
merged = first.append(second).append(third)
merged['precinct'] = merged['Unnamed: 0'].apply(
lambda x: x[len(pcand):][:7] if str(
x).startswith(pcand) else str(x)[:7])
return prepare_output(merged[['precinct', 'votes']],
'San Bernardino', 21,
['Sharon Runner'])
def san_bernardino_writeins():
writeins_url = 'http://www.sbcountyelections.com/Portals/9/Elections/2015/0317/PublicNotice_QualifiedWrite-InResults_FINAL.pdf'
writeins_req = requests.get(writeins_url)
writeins_req.raise_for_status()
writeins_pdf = PyPDF2.PdfFileReader(io.BytesIO(writeins_req.content))
cands = writeins_pdf.getPage(0).extractText().split('\n')[35:70:2]
return [{'candidate': c, 'votes': int(v), 'PRECINCT': 'Write-In'} for
c, v in zip(cands[::3], cands[2::3])]
def parse_san_bernardino():
output_columns = ['county', 'precinct', 'office',
'district', 'party', 'candidate', 'votes']
table = san_bernardino_sovc().append(san_bernardino_writeins())
table = table.assign(county='San Bernardino', office='State Senate',
district='21').replace({'candidate': candidates})
parties = {k: 'W/I' for k in candidates.values()}
parties.update(p)
table['party'] = table.candidate.apply(lambda x: parties[x])
for x in ['candidate', 'district', 'office', 'precinct', 'county']:
table = table.sort_values(by=x, kind='mergesort')
table[output_columns].to_csv(
'2015/20150317__ca__special__primary__san_bernardino__precinct.csv', index=False)
def main():
parse_alameda()
parse_contra_costa()
parse_orange()
parse_los_angeles()
parse_san_bernardino()
if __name__ == "__main__":
main()
| mit |
azukov/py-orbit | py/orbit/matching/matching.py | 2 | 9703 | import os
import string
import sys
from numpy import *
from scipy.optimize import fsolve
from scipy.optimize import root
from scipy.integrate import odeint
from scipy.constants import c
from matplotlib.pyplot import *
from orbit.teapot import TEAPOT_MATRIX_Lattice
class Twiss:
# Create a simple MAD-like twiss object:
def __init__(self):
self.data = { 'keyword': '',
's': 0.0,
'L': 0.0,
'alfx': 0.0,
'alfy': 0.0,
'betx': 0.0,
'bety': 0.0,
'mux' : 0.0,
'muy' : 0.0,
'Dx': 0.0,
'Dpx': 0.0,
'angle': 0.0,
'k1': 0.0 }
class Optics:
# An container class for twiss objects:
def __init__(self):
self.line = []
def __len__(self):
return len(self.line)
def __getitem__(self,j):
return self.line[j]
def __setitem__(self,j,x):
self.line[j]=x
def add(self, x):
self.line.append(x)
def print_line(self):
for j in xrange(0,len(self.line)):
print j, self.line[j].data['keyword'], "s:", self.line[j].data['s'], "L:", self.line[j].data['L'], 360.0*self.line[j].data['mux'],self.line[j].data['bety'],self.line[j].data['alfy']
def get_element(self, s):
Nb=len(self.line)
if self.line[0].data['s'] >= s and s >= 0.0:
return 0
for j in xrange(1,Nb):
if self.line[j-1].data['s'] < s and self.line[j].data['s'] >=s :
return j
if self.line[Nb-1].data['s'] < s :
return 0
if s < 0.0 :
return Nb-1
else:
print "error: s not in range"
print "STOP."
sys.exit(1)
def get_length(self):
Nb=len(self.line)
return self.line[Nb-1].data['s']
def readtwiss_teapot(self,lattice, bunch):
beamline=Optics()
matrix_lattice = TEAPOT_MATRIX_Lattice(lattice,bunch)
(arrmuX, arrPosAlphaX, arrPosBetaX) = matrix_lattice.getRingTwissDataX()
(arrmuY, arrPosAlphaY, arrPosBetaY) = matrix_lattice.getRingTwissDataY()
(DispersionX, DispersionXP) = matrix_lattice.getRingDispersionDataX()
(DispersionY, DispersionYP) = matrix_lattice.getRingDispersionDataY()
nodes = lattice.getNodes()
for node in nodes:
for j in range(len(arrPosBetaX)):
if (round(lattice.getNodePositionsDict()[node][1],4)==round(arrPosBetaX[j][0],4)):
muX = arrmuX[j][1]
betaX = arrPosBetaX[j][1]
alphaX = arrPosAlphaX[j][1]
dx = DispersionX[j][1]
dmux = DispersionXP[j][1]
muY = arrmuY[j][1]
betaY = arrPosBetaY[j][1]
alphaY = arrPosAlphaY[j][1]
dmuy = DispersionYP[j][1]
if node.getType() == "quad teapot":
k1l = node.getParam("kq")*node.getLength()
else:
k1l = 0.0
if node.getType() == "bend teapot":
angle = node.getParam("theta")
else:
angle = 0.0
beamline.add(1)
j=len(beamline)-1
beamline[j]=Twiss()
beamline[j].data['keyword']=node.getName()
beamline[j].data['marker']=node.getType()
beamline[j].data['s']=round(lattice.getNodePositionsDict()[node][1],4)
beamline[j].data['L']=node.getLength()
beamline[j].data['alfx']=alphaX
beamline[j].data['alfy']=alphaY
beamline[j].data['betx']=betaX
beamline[j].data['bety']=betaY
beamline[j].data['Dx']=dx
beamline[j].data['Dpx']=dmux
beamline[j].data['mux']=muX
beamline[j].data['muy']=muY
beamline[j].data['angle']=angle
beamline[j].data['k1']=k1l
return beamline
#------------------------------------------------------
# Read MADX TFS file
#-------------------------------------------------------
#------------------------------------------------------
# Envelope solver:
# x0, xs0, y0, ys0: initial values
# emitx/y: rms emittance
# Ksc: space charge perveance
#-------------------------------------------------------
class EnvelopeSolver:
def __init__(self,beamline):
self.beamline = beamline
def func_odeint(self,y,s,emitx,emity,sigma_p,Ksc):
jb=self.beamline.get_element(s)
k1=self.beamline[jb].data['k1']
lj=self.beamline[jb].data['L']
anglej=self.beamline[jb].data['angle']
f0=y[1]
f1=-(k1/lj+(anglej/lj)**2)*y[0]+emitx**2/y[0]**3+0.5*Ksc/(y[0]+y[2])+y[4]*sigma_p**2*anglej/(y[0]*lj)
f2=y[3]
f3=(k1/lj)*y[2]+emity**2/y[2]**3+0.5*Ksc/(y[0]+y[2]) # -
f4=y[5]
f5=-(k1/lj+(anglej/lj)**2)*y[4]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[4]+anglej/lj
return [f0,f1,f2,f3,f4,f5]
def Dfunc_odeint(self,y,s,emitx,emity,sigma_p,Ksc):
jb=self.beamline.get_element(s)
k1=self.beamline[jb].data['k1']
lj=self.beamline[jb].data['L']
anglej=self.beamline[jb].data['angle']
a0=-(k1/lj+(anglej/lj)**2)*y[0]+emitx**2/y[0]**3+0.5*Ksc/(y[0]+y[2])+y[4]*sigma_p**2*anglej/(y[0]*lj)
a1=-(k1/lj+(anglej/lj)**2)*y[1]-3.0*y[1]*emitx**2/y[0]**4-0.5*Ksc*(y[1]+y[3])/(y[0]+y[2])**2+y[5]*sigma_p**2*anglej/(y[0]*lj)-y[4]*y[1]*sigma_p**2*anglej/(y[0]**2*lj)
a2=(k1/lj)*y[2]+emity**2/y[2]**3+0.5*Ksc/(y[0]+y[2]) # -
a3=(k1/lj)*y[3]-3.0*y[3]*emity**2/y[2]**4-0.5*Ksc*(y[1]+y[3])/(y[0]+y[2])**2 # -
a4=-(k1/lj+(anglej/lj)**2)*y[4]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[4]+anglej/lj
a5=-(k1/lj+(anglej/lj)**2)*y[5]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[5]-0.5*Ksc/(y[0]*(y[0]+y[2]))**2*y[4]*(y[1]*(y[0]+y[2])+y[0]*(y[1]+y[3]) )
return [a0,a1,a2,a3,a4,a5]
def envelope_odeint(self, emitx, emity, sigma_p, Ksc, x0, xs0, y0, ys0, Dx0, Dxs0):
Np=1000
Nb=len(self.beamline)
Lb=self.beamline[Nb-1].data['s']
s=linspace(0.0,Lb,num=Np)
sol=odeint(self.func_odeint,[x0,xs0,y0,ys0,Dx0,Dxs0],s,args=(emitx,emity,sigma_p,Ksc),Dfun=self.Dfunc_odeint,rtol=1.0e-12,atol=1.0e-12)
envx=sol[:,0]
envxs=sol[:,1]
envy=sol[:,2]
envys=sol[:,3]
Dx=sol[:,4]
Dxs=sol[:,5]
return envx,envxs,envy,envys,Dx,Dxs,s
#------------------------------------------------------
# Match: Periodic solution starting from MADX result
#-------------------------------------------------------
# this is the function for the root searching routine (fsolve)
def func_fsolve(self,x,emitx,emity,sigma_p,Ksc):
envx,envxs,envy,envys,Dx,Dxs,s = self.envelope_odeint(emitx,emity,sigma_p,Ksc,x[0],x[1],x[2],x[3],x[4],x[5])
Nb=len(envx)
return [envx[Nb-1]-x[0],envxs[Nb-1]-x[1],envy[Nb-1]-x[2],envys[Nb-1]-x[3],Dx[Nb-1]-x[4],Dxs[Nb-1]-x[5]]
# root searching using fsolve and initial values from MADX
# returns matched envelopes
def match_root(self, emitx, emity, sigma_p, Ksc):
Nb=len(self.beamline)
# start values
x0=sqrt(self.beamline[Nb-1].data['betx']*emitx)
gamx=(1.0+(self.beamline[Nb-1].data['alfx'])**2)/self.beamline[Nb-1].data['betx']
xs0=-copysign(sqrt(gamx*emitx),self.beamline[Nb-1].data['alfx'])
y0=sqrt(self.beamline[Nb-1].data['bety']*emity)
gamy=(1.0+(self.beamline[Nb-1].data['alfy'])**2)/self.beamline[Nb-1].data['bety']
ys0=-copysign(sqrt(gamy*emity),self.beamline[Nb-1].data['alfy'])
Dx0=self.beamline[Nb-1].data['Dx']
Dxs0=self.beamline[Nb-1].data['Dpx']
# solver
sol = root(self.func_fsolve, [x0,xs0,y0,ys0,Dx0,Dxs0], args=(emitx,emity,sigma_p,Ksc),method='hybr')
x0=sol.x[0]
xs0=sol.x[1]
y0=sol.x[2]
ys0=sol.x[3]
Dx0=sol.x[4]
Dxs0=sol.x[5]
envx,envxs,envy,envys,Dx,Dxs,s = self.envelope_odeint(emitx,emity,sigma_p,Ksc,x0,xs0,y0,ys0,Dx0,Dxs0)
return envx, envxs, envy, envys, Dx, Dxs, s
# returns the matchted twiss parameter at cell entrance
def match_twiss(self, emitx, emity, sigma_p, Ksc):
Nb=len(self.beamline)
# start values
x0=sqrt(self.beamline[Nb-1].data['betx']*emitx)
gamx=(1.0+(self.beamline[Nb-1].data['alfx'])**2)/self.beamline[Nb-1].data['betx']
xs0=-copysign(sqrt(gamx*emitx),self.beamline[Nb-1].data['alfx'])
y0=sqrt(self.beamline[Nb-1].data['bety']*emity)
gamy=(1.0+(self.beamline[Nb-1].data['alfy'])**2)/self.beamline[Nb-1].data['bety']
ys0=-copysign(sqrt(gamy*emity),self.beamline[Nb-1].data['alfy'])
Dx0=self.beamline[Nb-1].data['Dx']
Dxs0=self.beamline[Nb-1].data['Dpx']
# solver
sol = root(self.func_fsolve, [x0,xs0,y0,ys0,Dx0,Dxs0], args=(self.beamline,emitx,emity,sigma_p,Ksc),method='hybr')
x0=sol.x[0]
xs0=sol.x[1]
y0=sol.x[2]
ys0=sol.x[3]
Dx0=sol.x[4]
Dxs0=sol.x[5]
return x0**2/emitx,y0**2/emity,-copysign(sqrt(x0**2*xs0**2/emitx**2),xs0),-copysign(sqrt(y0**2*ys0**2/emity**2),ys0), Dx0, Dxs0
#------------------------------------------------------
# Smooth focusing
#-------------------------------------------------------
def func_smooth(self,x,phase0x,phase0y,length,emitx,emity,Ksc):
kx=(phase0x/length)**2
ky=(phase0y/length)**2
return[emitx**2/x[0]**3-kx*x[0]+0.5*Ksc/(x[0]+x[1]),emity**2/x[1]**3-ky*x[1]+0.5*Ksc/(x[0]+x[1])]
def match_smooth(self,phase0x,phase0y,length,emitx,emity,Ksc):
kx=(phase0x/length)**2
ky=(phase0y/length)**2
x0=(emitx**2/kx)**(1.0/4.0)
y0=(emity**2/ky)**(1.0/4.0)
sol = root(self.func_smooth,[x0,y0],args=(phase0x,phase0y,length,emitx,emity,Ksc),method='hybr')
return sol.x[0]**2/emitx,sol.x[1]**2/emity # beta functions
#------------------------------------------------------
# Calculate phase advance for given envelopes
#-------------------------------------------------------
def phase_advance(self,envx,envy,Dx,emitx,emity,sigma_p,s):
Np=len(s)
phasex=0.0
phasey=0.0
ds=s[1]-s[0]
for j in xrange(0,Np):
phasex+=ds*emitx/(envx[j]**2-(Dx[j]*sigma_p)**2)
phasey+=ds*emity/envy[j]**2
return phasex, phasey
# analytic phase advance depression
# lc: length of the cell
def phase_analytic(self,emitx,emity,Ksc,lc):
return 0.5*Ksc*lc/(4.0*emitx), 0.5*Ksc*lc/(4.0*emity)
#------------------------------------------------------
# Entropy growth rate: pre-factor
#-------------------------------------------------------
def entropy_rate(self,envx,envy,emitx,emity,s,beta0):
Np=len(s)
ratet=0.0
ds=s[1]-s[0]
for j in xrange(0,Np):
Tx=envx[j]**2/emitx**2
Ty=envy[j]**2/emity**2
ratet+=ds/(beta0*c)*0.5*(Tx-Ty)**2/(Tx*Ty)
return ratet
| mit |
siutanwong/scikit-learn | examples/svm/plot_rbf_parameters.py | 57 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
jacob-ruth/RuthNote | load_dataset.py | 1 | 3007 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 01:08:03 2017
@author: jacob
"""
import numpy as np
from generate_dataset import Song
import matplotlib.pyplot as plt
max_tempo = 1500000
max_beats = 1500
num_notes = 128
def load_item(value):
return value[0].toarray()
def unique_notes(value):
notes = np.transpose(load_item(value) != 0)
return np.vstack({tuple(row) for row in notes}).shape[1]
def num_beats(song):
num_ticks = song.piano_roll.shape[1]
return num_ticks/song.ticks_per_beat
def validate(song):
return max_tempo > song.tempo and num_beats(song) < max_beats
def downsample_to(song, ticks_per_beat, errors=False, sample_many=False):
num_ticks = song.piano_roll.shape[1]
num_beats = num_ticks / song.ticks_per_beat
old_roll = song.piano_roll.toarray()
if(sample_many):
downsample_slice_options = map(lambda start: np.round(np.linspace(start, num_ticks - 1 , num_beats * ticks_per_beat)).astype(int), range(0, ticks_per_beat))
def score_slice(slices):
new_roll = old_roll[:,slices]
return np.count_nonzero(np.sum(new_roll[:,:-1] != new_roll[:,1:], axis=0))
optimal_slice = max(downsample_slice_options, key=score_slice)
else:
optimal_slice = np.round(np.linspace(0, num_ticks - 1 , num_beats * ticks_per_beat)).astype(int)
if errors:
new_roll = old_roll[:, optimal_slice]
num_difference_new = np.count_nonzero(np.sum(new_roll[:,:-1] != new_roll[:,1:], axis=0))
num_difference_old = np.count_nonzero(np.sum(old_roll[:,:-1] != old_roll[:,1:], axis=0))
return Song(old_roll[:,optimal_slice], ticks_per_beat, song.filepath, song.tempo), num_difference_new, num_difference_old
else:
return Song(old_roll[:,optimal_slice], ticks_per_beat, song.filepath, song.tempo)
dataset = list(map(lambda song: downsample_to(song, 16, errors=True), filter(validate, np.load('dataset.npy'))))
num_beats_in_songs = list(map(lambda song: num_beats(song[0]), dataset))
plt.hist(num_beats_in_songs, alpha=0.75, label=['Beats'])
plt.title('beats in songs')
plt.show()
#See how much information we lost
normal_changes = np.fromiter(map(lambda x: x[1], dataset), np.float)
old_changes = np.fromiter(map(lambda x: x[2], dataset), np.float)
data = np.divide(normal_changes, old_changes)
plt.hist(data, alpha=0.75, label=['Accuracy'])
plt.legend(loc='upper right')
plt.title('niave start')
plt.show()
print('Average Accuracy:', np.mean(data))
dataset = list(map(lambda song: downsample_to(song, 16, errors=True, sample_many=True), filter(validate, np.load('dataset.npy'))))
#See how much information we lost
normal_changes = np.fromiter(map(lambda x: x[1], dataset), np.float)
old_changes = np.fromiter(map(lambda x: x[2], dataset), np.float)
data = np.divide(normal_changes, old_changes)
plt.hist(data, alpha=0.75, label=['Accuracy'])
plt.legend(loc='upper right')
plt.title('smarter start')
plt.show()
print('Average Accuracy:', np.mean(data)) | mit |
bikong2/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
nkmk/python-snippets | notebook/pandas_str_combine.py | 1 | 3607 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv').head(3)
print(df)
# name age state point
# 0 Alice 24 NY 64
# 1 Bob 42 CA 92
# 2 Charlie 18 CA 70
print(df['name'].str.cat(df['state']))
# 0 AliceNY
# 1 BobCA
# 2 CharlieCA
# Name: name, dtype: object
print(df['name'].str.cat(df['state'], sep=' in '))
# 0 Alice in NY
# 1 Bob in CA
# 2 Charlie in CA
# Name: name, dtype: object
print(df['name'].str.cat(['X', 'Y', 'Z'], sep=' in '))
# 0 Alice in X
# 1 Bob in Y
# 2 Charlie in Z
# Name: name, dtype: object
print(df['name'].str.cat([df['state'], ['X', 'Y', 'Z']], sep='-'))
# 0 Alice-NY-X
# 1 Bob-CA-Y
# 2 Charlie-CA-Z
# Name: name, dtype: object
# print(df['name'].str.cat('X', sep='-'))
# ValueError: Did you mean to supply a `sep` keyword?
print(df['name'] + df['state'])
# 0 AliceNY
# 1 BobCA
# 2 CharlieCA
# dtype: object
print(df['name'] + ' in ' + df['state'])
# 0 Alice in NY
# 1 Bob in CA
# 2 Charlie in CA
# dtype: object
print(df['name'] + ' in ' + df['state'] + ' - ' + ['X', 'Y', 'Z'])
# 0 Alice in NY - X
# 1 Bob in CA - Y
# 2 Charlie in CA - Z
# dtype: object
df['col_NaN'] = ['X', pd.np.nan, 'Z']
print(df)
# name age state point col_NaN
# 0 Alice 24 NY 64 X
# 1 Bob 42 CA 92 NaN
# 2 Charlie 18 CA 70 Z
print(df['name'].str.cat(df['col_NaN'], sep='-'))
# 0 Alice-X
# 1 NaN
# 2 Charlie-Z
# Name: name, dtype: object
print(df['name'].str.cat(df['col_NaN'], sep='-', na_rep='No Data'))
# 0 Alice-X
# 1 Bob-No Data
# 2 Charlie-Z
# Name: name, dtype: object
print(df['name'] + '-' + df['col_NaN'])
# 0 Alice-X
# 1 NaN
# 2 Charlie-Z
# dtype: object
print(df['name'] + '-' + df['col_NaN'].fillna('No Data'))
# 0 Alice-X
# 1 Bob-No Data
# 2 Charlie-Z
# dtype: object
# print(df['name'].str.cat(df['age'], sep='-'))
# TypeError: sequence item 1: expected str instance, int found
print(df['name'].str.cat(df['age'].astype(str), sep='-'))
# 0 Alice-24
# 1 Bob-42
# 2 Charlie-18
# Name: name, dtype: object
# print(df['name'] + '-' + df['age'])
# TypeError: can only concatenate str (not "int") to str
print(df['name'] + '-' + df['age'].astype(str))
# 0 Alice-24
# 1 Bob-42
# 2 Charlie-18
# dtype: object
df['name_state'] = df['name'].str.cat(df['state'], sep=' in ')
print(df)
# name age state point col_NaN name_state
# 0 Alice 24 NY 64 X Alice in NY
# 1 Bob 42 CA 92 NaN Bob in CA
# 2 Charlie 18 CA 70 Z Charlie in CA
print(df.drop(columns=['name', 'state']))
# age point col_NaN name_state
# 0 24 64 X Alice in NY
# 1 42 92 NaN Bob in CA
# 2 18 70 Z Charlie in CA
df = pd.read_csv('data/src/sample_pandas_normal.csv').head(3)
print(df)
# name age state point
# 0 Alice 24 NY 64
# 1 Bob 42 CA 92
# 2 Charlie 18 CA 70
print(df.assign(name_state=df['name'] + ' in ' + df['state']))
# name age state point name_state
# 0 Alice 24 NY 64 Alice in NY
# 1 Bob 42 CA 92 Bob in CA
# 2 Charlie 18 CA 70 Charlie in CA
print(df.assign(name_state=df['name'] + ' in ' + df['state']).drop(columns=['name', 'state']))
# age point name_state
# 0 24 64 Alice in NY
# 1 42 92 Bob in CA
# 2 18 70 Charlie in CA
| mit |
kpeiruza/incubator-spot | spot-oa/oa/flow/flow_oa.py | 1 | 18676 |
import logging
import shutil
import os
import sys
import json
import numpy as np
import linecache, bisect
import csv
import pandas as pd
from collections import OrderedDict
from multiprocessing import Process
from utils import Util,ProgressBar
from components.data.data import Data
from components.geoloc.geoloc import GeoLocalization
from components.reputation.gti import gti
import time
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.Flow') if logger else Util.get_logger('OA.Flow',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "flow"
self._flow_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._flow_scores = []
self._results_delimiter = '\t'
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# get scores fields conf
conf_file = "{0}/flow_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
self._engine = Data(self._db, self._table_name,self._logger)
def start(self):
####################
start = time.time()
####################
self._create_folder_structure()
self._add_ipynb()
self._get_flow_results()
self._add_network_context()
self._add_geo_localization()
self._add_reputation()
self._create_flow_scores_csv()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _create_folder_structure(self):
# create date folder structure if it does not exist.
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("flow",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding edge investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Edge_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Edge_Investigation.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_flow_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
flow_results = "{0}/flow_results.csv".format(self._data_path)
# get hdfs path from conf file
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/flow/scored_results/{1}/scores/flow_results.csv".format(HUSER,self._date)
# get results file from hdfs
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
# valdiate files exists
if os.path.isfile(flow_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} flow results file: {1}".format(self._date,flow_results))
self._flow_results = Util.read_results(flow_results,self._limit,self._results_delimiter)
if len(self._flow_results) == 0: self._logger.error("There are not flow results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# add headers.
self._logger.info("Adding headers based on configuration file: score_fields.json")
self._flow_scores = [ [ str(key) for (key,value) in self._conf['flow_score_fields'].items()] ]
# filter results add sev and rank.
self._logger.info("Filtering required columns based on configuration")
self._flow_scores.extend([ [0] + [ conn[i] for i in self._conf['column_indexes_filter'] ] + [n] for n, conn in enumerate(self._flow_results) ])
def _create_flow_scores_csv(self):
flow_scores_csv = "{0}/flow_scores.csv".format(self._data_path)
Util.create_csv_file(flow_scores_csv,self._flow_scores)
# create bk file
flow_scores_bu_csv = "{0}/flow_scores_bu.csv".format(self._data_path)
Util.create_csv_file(flow_scores_bu_csv,self._flow_scores)
def _add_network_context(self):
# use ipranges to see if the IPs are internals.
ip_ranges_file = "{0}/context/ipranges.csv".format(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# add new headers (srcIpInternal/destIpInternal).
self._logger.info("Adding network context headers")
flow_headers = self._flow_scores[0]
flow_headers.extend(["srcIpInternal","destIpInternal"])
# add values to srcIpInternal and destIpInternal.
flow_scores = iter(self._flow_scores)
next(flow_scores)
if os.path.isfile(ip_ranges_file):
self._logger.info("Start adding network context...")
# get ranges from configuration file.
self._logger.info("Reading network context file: {0}".format(ip_ranges_file))
with open(ip_ranges_file, 'rb') as f:
nc_ranges = [ map(Util.ip_to_int,line.strip('\n').split(',')) for line in f ]
# get src and dst IPs
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# add networkcontext per connection.
ip_internal_ranges = filter(None,nc_ranges)
self._logger.info("Adding networkcontext to suspicious connections.")
self._flow_scores = [ conn + [ self._is_ip_internal(conn[src_ip_index],ip_internal_ranges)]+[ self._is_ip_internal(conn[dst_ip_index],ip_internal_ranges)] for conn in flow_scores]
else:
self._flow_scores = [ conn + ["",""] for conn in flow_scores ]
self._logger.info("WARNING: Network context was not added because the file ipranges.csv does not exist.")
self._flow_scores.insert(0,flow_headers)
def _is_ip_internal(self,ip, ranges):
result = 0
for row in ranges:
if Util.ip_to_int(ip) >= row[0] and Util.ip_to_int(ip) <= row[1]:
result = 1
break
return result
def _add_geo_localization(self):
# use ipranges to see if the IPs are internals.
iploc_file = "{0}/context/iploc.csv".format(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# add new headers (srcIpInternal/destIpInternal).
self._logger.info("Adding geo localization headers")
flow_headers = self._flow_scores[0]
flow_headers.extend(["srcGeo","dstGeo","srcDomain","dstDomain"])
# add values to srcIpInternal and destIpInternal.
flow_scores = iter(self._flow_scores)
next(flow_scores)
if os.path.isfile(iploc_file):
self._logger.info("Initializing geo localization component")
geo = GeoLocalization(iploc_file,self._logger)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
self._logger.info("Adding geo localization...")
self._flow_scores = []
for conn in flow_scores:
# get geo localizastin for src ip
self._logger.debug("Searching geo for src ip {0}".format(conn[src_ip_index]))
src_geo_dict = geo.get_ip_geo_localization(conn[src_ip_index])
# get goe localization for dst ip.
self._logger.debug("Searching geo for dst ip {0}".format(conn[dst_ip_index]))
dst_geo_dict = geo.get_ip_geo_localization(conn[dst_ip_index])
# adding columns to the current connection list.
conn.extend([src_geo_dict["geo_loc"],dst_geo_dict["geo_loc"],src_geo_dict["domain"],dst_geo_dict["domain"]])
self._flow_scores.extend([conn])
else:
self._flow_scores = [ conn + ["","","",""] for conn in flow_scores ]
self._logger.info("WARNING: IP location was not added because the file {0} does not exist.".format(iploc_file))
self._flow_scores.insert(0,flow_headers)
def _add_reputation(self):
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# add new headers (gtiSrcRep/gtiDstRep).
self._logger.info("Adding reputation headers")
flow_headers_rep = self._flow_scores[0]
flow_headers_rep.extend(["srcIP_rep","dstIP_rep"])
# read configuration.
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
if "gti" in rep_conf and os.path.isfile(rep_conf['gti']['refclient']):
rep_conf = rep_conf['gti']
# initialize gti module.
self._logger.info("Initializing GTI component")
flow_gti = gti.Reputation(rep_conf,self._logger)
# get all src ips.
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
self._logger.info("Getting GTI reputation for src IPs")
flow_scores_src = iter(self._flow_scores)
next(flow_scores_src)
# getting reputation for src IPs
src_ips = [ conn[src_ip_index] for conn in flow_scores_src ]
src_rep_results = flow_gti.check(src_ips)
self._logger.info("Getting GTI reputation for dst IPs")
flow_scores_dst = iter(self._flow_scores)
next(flow_scores_dst)
# getting reputation for dst IPs
dst_ips = [ conn[dst_ip_index] for conn in flow_scores_dst ]
dst_rep_results = flow_gti.check(dst_ips)
flow_scores_final = iter(self._flow_scores)
next(flow_scores_final)
self._flow_scores = []
flow_scores = [conn + [src_rep_results[conn[src_ip_index]]] + [dst_rep_results[conn[dst_ip_index]]] for conn in flow_scores_final ]
self._flow_scores = flow_scores
else:
# add values to gtiSrcRep and gtiDstRep.
flow_scores = iter(self._flow_scores)
next(flow_scores)
self._flow_scores = [ conn + ["",""] for conn in flow_scores ]
self._logger.info("WARNING: IP reputation was not added. No refclient configured")
self._flow_scores.insert(0,flow_headers_rep)
def _get_oa_details(self):
self._logger.info("Getting OA Flow suspicious details/chord diagram")
# start suspicious connects details process.
p_sp = Process(target=self._get_suspicious_details)
p_sp.start()
# start chord diagram process.
p_ch = Process(target=self._get_chord_details)
p_ch.start()
p_sp.join()
p_ch.join()
def _get_suspicious_details(self,bar=None):
# skip header
sp_connections = iter(self._flow_scores)
next(sp_connections)
# loop connections.
connections_added = []
for conn in sp_connections:
# validate if the connection's details are not already extracted.
if conn in connections_added:
continue
else:
connections_added.append(conn)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# get src ip
sip = conn[src_ip_index]
# get dst ip
dip = conn[dst_ip_index]
# get hour and date (i.e. 2014-07-08 10:10:40)
date_array = conn[1].split(' ')
date_array_1 = date_array[0].split('-')
date_array_2 = date_array[1].split(':')
yr = date_array_1[0]
dy = date_array_1[2]
mh = date_array_1[1]
hr = date_array_2[0]
mm = date_array_2[1]
# connection details query.
sp_query = ("SELECT treceived as tstart,sip as srcip,dip as dstip,sport as sport,dport as dport,proto as proto,flag as flags,stos as TOS,ibyt as ibytes,ipkt as ipkts,input as input, output as output,rip as rip, obyt as obytes, opkt as opkts from {0}.{1} where ((sip='{2}' AND dip='{3}') or (sip='{3}' AND dip='{2}')) AND y={8} AND m={4} AND d={5} AND h={6} AND trminute={7} order by tstart limit 100")
# sp query.
sp_query = sp_query.format(self._db,self._table_name,sip,dip,mh,dy,hr,mm,yr)
# output file.
edge_file = "{0}/edge-{1}-{2}-{3}-{4}.tsv".format(self._data_path,sip.replace(".","_"),dip.replace(".","_"),hr,mm)
# execute query
self._engine.query(sp_query,output_file=edge_file,delimiter="\\t")
def _get_chord_details(self,bar=None):
# skip header
sp_connections = iter(self._flow_scores)
next(sp_connections)
src_ip_index = self._conf["flow_score_fields"]["srcIP"]
dst_ip_index = self._conf["flow_score_fields"]["dstIP"]
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
# get number of times each IP appears.
srcdict = {}
for conn in sp_connections:
if conn[src_ip_index] in srcdict:srcdict[conn[src_ip_index]] += 1
else:srcdict[conn[src_ip_index]] = 1
if conn[dst_ip_index] in srcdict:srcdict[conn[dst_ip_index]] += 1
else:srcdict[conn[dst_ip_index]] = 1
for (ip,n) in srcdict.items():
if n > 1:
ip_list = []
sp_connections = iter(self._flow_scores)
next(sp_connections)
for row in sp_connections:
if ip == row[2] : ip_list.append(row[3])
if ip == row[3] :ip_list.append(row[2])
ips = list(set(ip_list))
if len(ips) > 1:
ips_filter = (",".join(str("'{0}'".format(ip)) for ip in ips))
chord_file = "{0}/chord-{1}.tsv".format(self._data_path,ip.replace(".","_"))
ch_query = ("SELECT sip as srcip, dip as dstip, SUM(ibyt) as ibytes, SUM(ipkt) as ipkts from {0}.{1} where y={2} and m={3} \
and d={4} and ( (sip='{5}' and dip IN({6})) or (sip IN({6}) and dip='{5}') ) group by sip,dip")
self._engine.query(ch_query.format(self._db,self._table_name,yr,mn,dy,ip,ips_filter),chord_file,delimiter="\\t")
def _ingest_summary(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
self._logger.info("Getting ingest summary data for the day")
ingest_summary_cols = ["date","total"]
result_rows = []
df_filtered = pd.DataFrame()
ingest_summary_file = "{0}/is_{1}{2}.csv".format(self._ingest_summary_path,yr,mn)
ingest_summary_tmp = "{0}.tmp".format(ingest_summary_file)
if os.path.isfile(ingest_summary_file):
df = pd.read_csv(ingest_summary_file, delimiter=',',names=ingest_summary_cols, skiprows=1)
df_filtered = df[df['date'].str.contains("{0}-{1}-{2}".format(yr, mn, dy)) == False]
else:
df = pd.DataFrame()
# get ingest summary.
ingest_summary_qry = ("SELECT tryear, trmonth, trday, trhour, trminute, COUNT(*) total"
" FROM {0}.{1} "
" WHERE "
" y={2} "
" AND m={3} "
" AND d={4} "
" AND unix_tstamp IS NOT NULL AND sip IS NOT NULL "
" AND sport IS NOT NULL AND dip IS NOT NULL "
" AND dport IS NOT NULL AND ibyt IS NOT NULL "
" AND ipkt IS NOT NULL "
" GROUP BY tryear, trmonth, trday, trhour, trminute;")
ingest_summary_qry = ingest_summary_qry.format(self._db,self._table_name, yr, mn, dy)
results_file = "{0}/results_{1}.csv".format(self._ingest_summary_path,self._date)
self._engine.query(ingest_summary_qry,output_file=results_file,delimiter=",")
if os.path.isfile(results_file):
result_rows = pd.read_csv(results_file, delimiter=',')
df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(yr, mn, dy, str(val['trhour']).zfill(2), str(val['trminute']).zfill(2)), int(val[5])] for key,val in result_rows.iterrows()],columns = ingest_summary_cols)
df_filtered = df_filtered.append(df_new, ignore_index=True)
df_filtered.to_csv(ingest_summary_tmp,sep=',', index=False)
os.remove(results_file)
os.rename(ingest_summary_tmp,ingest_summary_file)
else:
self._logger.info("No data found for the ingest summary")
| apache-2.0 |
varenius/salsa | USRP/usrp_gnuradio_dev/plot_array_files_sigref_vel.py | 1 | 2832 | import matplotlib.pyplot as plt
import numpy as np
import sys
import time
import scipy.signal as sig
from scipy.constants import c
nu0 = 1420.40575177 # MHz
sigfile = sys.argv[1]
reffile = sys.argv[2]
files = [sigfile, reffile]
result = []
for infile in files:
indata = np.load(infile)
spec = indata[0]
samp_rate = indata[1]
fftsize = indata[2]
center_freq = indata[3] # MHz
halffft = int(0.5*fftsize)
freqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
delta_nu = samp_rate/fftsize
RFI = [[1419.4-0.210, 0.02],
[1419.4-1.937, 0.015],
[1419.4-4.4, 0.015],
[1419.4+3.0, 0.01],
[center_freq, 4*delta_nu], # remove dip in the center of band, always about 4 fft points wide. Use 8, else errors
[1416.4-0.8, 0.04],
[1420.4-2, 0.01],
[1425, 0.01],
[1424.4-1.8, 0.01],
[1424.4+0.5845, 0.01],
[1424.4+0.483, 0.005],
]
for item in RFI:
RFI_freq = item[0]
RFI_width = item[1]
ch0_freq = center_freq - 0.5*samp_rate
ind_low = int(np.floor((RFI_freq-0.5*RFI_width - ch0_freq)/delta_nu))
ind_high = int(np.ceil((RFI_freq+0.5*RFI_width - ch0_freq)/delta_nu))
if ind_low>0 and ind_high<len(spec):
margin = min(ind_high-ind_low, ind_low, len(spec)-ind_high)
RFI_part = spec[ind_low-margin:ind_high+margin]
xdata = np.arange(len(RFI_part))
weights = np.ones_like(RFI_part)
weights[margin:-margin] = 0.0 # Ignore RFI when fitting
pf = np.polyfit(xdata, RFI_part, deg=1, w=weights)
interpdata = np.polyval(pf, xdata)
spec[ind_low:ind_high] = interpdata[margin:-margin]
else:
print 'Ignoring', item
calspec = spec*230 # Determined from comparing with LAB survey at glong 80, glat 0
result.append(calspec[:])
specres = result[0]-result[1]
plt.figure()
indata = np.load(sigfile)
samp_rate = indata[1]
fftsize = indata[2]
center_freq = indata[3] # MHz
halffft = int(0.5*fftsize)
freqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
vels = -1*1e-3* (freqs+center_freq -nu0)*c/nu0 # km/s, -1 to get sign right compared to LAB
plt.xlabel('Offset from ' + str(round(center_freq, 2)) + 'MHz')
plt.plot(freqs, result[0])
plt.plot(freqs, result[1])
plt.legend(['Sig', 'Ref'])
plt.figure()
plt.xlabel('km/s')
print vels
plt.plot(vels, result[0])
plt.plot(vels, result[1])
plt.legend(['Sig', 'Ref'])
plt.figure()
plt.plot(vels, specres)
plt.title('Signal - ref * [Approx. LAB comparision at 80,0] ')
l = len(specres)
lind = 0.1*l
hind = 0.9*l
plt.figure()
nout = 1024
dec = sig.decimate(specres, fftsize/nout, axis=0)
plt.plot(dec)
plt.title('Decimated to ' +str(nout) + 'ch.')
plt.show()
| mit |
brentp/pedagree | scripts/extract-1kg.py | 2 | 2267 | import numpy as np
from cyvcf2 import VCF
from scipy.stats import chi2_contingency
def get_hwe_likelihood(obs_hom_ref, obs_het, obs_hom_alt, aaf):
"""
Compute the likelihood of deviation from HWE using X^2,
as well as the inbreeding coefficient.
"""
# Bail out if aaf is undefined. This occurs
# when there are multiple alternate alleles
q = aaf
p = 1 - aaf
obs = [obs_hom_ref, obs_het, obs_hom_alt]
n = sum(obs)
exp = [p ** 2, 2 * p * q, q ** 2]
exp = [n * v for v in exp]
return chi2_contingency(np.array([exp, obs]))
if __name__ == "__main__":
import sys
#sites = [x.strip().split(":") for x in open('/uufs/chpc.utah.edu/common/home/u6000771/purcell5k.intervals')]
sites = [x.strip().split(":") for x in open(sys.argv[1])]
for s in sites:
s[1] = int(s[1])
hwes = []
a = {}
vcf = VCF('/uufs/chpc.utah.edu/common/home/u6000294/isilon/1kg_p3/up/ALL.phase3.autosome.vcf.gz', gts012=True)
for i, (chrom, pos) in enumerate(sites):
k = 0
for v in vcf('%s:%s-%s' % (chrom, pos, pos)):
if len(v.ALT) != 1: continue
if len(v.REF) != 1: continue
if '>' in v.ALT[0]: continue
if v.call_rate < 0.8: continue
if not (0.04 < v.aaf < 0.99): continue
key = (v.CHROM, v.POS, v.REF, v.ALT[0])
x = get_hwe_likelihood(v.num_hom_ref, v.num_het, v.num_hom_alt, v.aaf)
if x[1] < 0.04: continue
sites[i].extend((v.REF, v.ALT[0]))
a[key] = np.array(v.gt_types)
if k > 0:
print sites[i]
k += 1
hwes.append(x[1])
if len(hwes) % 500 == 0:
print sites[i], len(hwes)
print sites[i], len(hwes)
from matplotlib import pyplot as plt
plt.hist(hwes, 50)
plt.show()
a = np.array([a[tuple(s)] for s in sites if len(s) == 4], dtype=np.uint8)
sites = [tuple(s) for s in sites if len(s) == 4]
print(len(sites))
print(a.shape)
with open('1kg.sites.%d.%d.bin' % a.shape, 'w') as fh:
fh.write(a.tostring())
with open('1kg.sites', 'w') as fh:
fh.write("\n".join("%s:%d:%s:%s" % tuple(s) for s in sites))
fh.write("\n")
| mit |
psychopy/psychopy | psychopy/app/themes/_themes.py | 1 | 46279 | import os
import subprocess
import sys
import wx
import wx.lib.agw.aui as aui
import wx.stc as stc
import wx.richtext
from psychopy.localization import _translate
from wx import py
import numpy
import keyword
import builtins
from pathlib import Path
from psychopy import prefs
from psychopy import logging
import psychopy
from ...experiment import components
import json
if sys.platform=='win32':
from matplotlib import font_manager
fm = font_manager.FontManager()
thisFolder = Path(__file__).parent
iconsPath = Path(prefs.paths['resources'])
try:
FileNotFoundError
except NameError:
# Py2 has no FileNotFoundError
FileNotFoundError = IOError
allCompons = components.getAllComponents() # ensures that the icons get checked
# Create library of "on brand" colours
cLib = {
'none': [127, 127, 127, 0],
'black': [0, 0, 0],
'grey': [102, 102, 110],
'white': [242, 242, 242],
'red': [242, 84, 91],
'green': [108, 204, 116],
'blue': [2, 169, 234],
'yellow': [241, 211, 2],
'orange': [236, 151, 3],
'purple': [195, 190, 247],
'darker': {},
'lighter': {},
'very': {'lighter': {},
'darker': {}}
}
# Create light and dark variants of each colour by +-15 to each value
for c in cLib:
if not c in ['darker', 'lighter', 'none', 'very']:
cLib['darker'][c] = [max(0, n-15) for n in cLib[c]]
cLib['lighter'][c] = [min(255, n+15) for n in cLib[c]]
# Create very light and very dark variants of each colour by a further +-30 to each value
for c in cLib['lighter']:
cLib['very']['lighter'][c] = [min(255, n+30) for n in cLib['lighter'][c]]
for c in cLib['darker']:
cLib['very']['darker'][c] = [max(0, n-30) for n in cLib['darker'][c]]
class ThemeMixin:
lexers = {
stc.STC_LEX_PYTHON: "python",
stc.STC_LEX_CPP: "c++",
stc.STC_LEX_R: "R"
#stc.STC_LEX_JSON: "json"
}
# these are populated and modified by PsychoPyApp.theme.setter
spec = None
codetheme = 'PsychopyLight'
mode = 'light'
icons = 'light'
codeColors = {}
appColors = {}
appIcons = {'components': {},
'resources': {}}
def loadThemeSpec(self, themeName):
"""Load a spec file from disk"""
# a theme spec contains the spec for the *code* theme as well as a mode
# that determines which colorscheme to load for the app (separate)
themesPath = Path(prefs.paths['themes'])
# first load the *theme* which contains the mode name for the app
try:
with open(str(themesPath / (themeName+".json")), "rb") as fp:
ThemeMixin.spec = themeSpec = json.load(fp)
except FileNotFoundError:
with open(str(themesPath / "PsychopyLight.json"), "rb") as fp:
ThemeMixin.spec = themeSpec = json.load(fp)
appColorMode = themeSpec['app']
# Get app spec
try:
with open(str(themesPath / "app/{}.json".format(appColorMode)), "rb") as fp:
ThemeMixin.spec = appColors = json.load(fp)
except FileNotFoundError:
with open(str(themesPath / "app/light.json"), "rb") as fp:
ThemeMixin.spec = appColors = json.load(fp)
# Set app theme
ThemeMixin.mode = appColorMode
self._setAppColors(appColors)
# Set app icons
if 'icons' in themeSpec:
ThemeMixin.icons = themeSpec['icons']
else:
ThemeMixin.icons = themeSpec['app']
# Set coder theme
codertheme = themeSpec
ThemeMixin.codetheme = themeName
self._setCodeColors(codertheme)
def _applyAppTheme(self, target=None):
"""Applies colorScheme recursively to the target and its children
Parameters
----------
colorScheme: the new color spec being applied (dict)
target: the wx object to which being applied
depth: depth in the tree of wx objects
"""
# Define subfunctions to handle different object types
def applyToToolbar(target):
target.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# Clear tools
target.ClearTools()
# Redraw tools
target.makeTools()
def applyToStatusBar(target):
target.SetBackgroundColour(cLib['white'])
def applyToFrame(target):
target.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
if hasattr(target, 'GetAuiManager'):
target.GetAuiManager().SetArtProvider(PsychopyDockArt())
target.GetAuiManager().Update()
for menu in target.GetMenuBar().GetMenus():
for submenu in menu[0].MenuItems:
if isinstance(submenu.SubMenu, ThemeSwitcher):
submenu.SubMenu._applyAppTheme()
def applyToPanel(target):
target.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
def applyToNotebook(target):
# Dict of icons to apply to specific tabs
tabIcons = {
"Structure": "coderclass16.png",
"FileBrowser": "folder-open16.png",
"PythonShell": "coderpython16.png",
"ConsoleOutput": "stdout.png",
}
target.SetArtProvider(PsychopyTabArt())
target.GetAuiManager().SetArtProvider(PsychopyDockArt())
for index in range(target.GetPageCount()):
page = target.GetPage(index)
page.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
if page.GetName() in tabIcons:
bmp = IconCache.getBitmap(IconCache(), tabIcons[page.GetName()])
target.SetPageBitmap(index, bmp)
page._applyAppTheme()
def applyToCodeEditor(target):
spec = ThemeMixin.codeColors.copy()
base = spec['base']
# Set margin size according to text size
if not isinstance(target, wx.py.shell.Shell):
target.SetMarginWidth(0, 4 * prefs.coder['codeFontSize'])
# Override base font with user spec if present
prefkey = 'outputFont' if isinstance(target, wx.py.shell.Shell) else 'codeFont'
if prefs.coder[prefkey].lower() != "From Theme...".lower():
for key in spec:
if 'font' in spec[key]:
spec[key]['font'] = prefs.coder[prefkey] if spec[key]['font'] == base['font'] \
else base['font']
base['font'] = prefs.coder[prefkey]
# Check that key is in tag list
invalid = []
for key in spec:
if key not in self.tags:
invalid += [key]
for key in invalid:
del spec[key]
# Check for language specific spec
if target.GetLexer() in target.lexers:
lexer = target.lexers[target.GetLexer()]
else:
lexer = 'invlex'
if lexer in spec:
# If there is lang specific spec, delete subkey...
lang = spec['lexer'] # ...and append spec to root, overriding any generic spec
spec.update({key: lang[key] for key in lang})
else:
lang = {}
# Set style for undefined lexers
for key in [getattr(wx._stc, item) for item in dir(wx._stc) if item.startswith("STC_LEX")]:
target.StyleSetBackground(key, base['bg'])
target.StyleSetForeground(key, base['fg'])
target.StyleSetSpec(key, "face:%(font)s,size:%(size)d" % base)
# Set style from universal data
for key in spec:
if target.tags[key] is not None:
target.StyleSetBackground(target.tags[key], spec[key]['bg'])
target.StyleSetForeground(target.tags[key], spec[key]['fg'])
target.StyleSetSpec(target.tags[key], "face:%(font)s,size:%(size)d" % spec[key])
# Apply keywords
for level, val in target.lexkw.items():
target.SetKeyWords(level, " ".join(val))
# Set margin
target.SetFoldMarginColour(True, spec['margin']['bg'])
target.SetFoldMarginHiColour(True, spec['margin']['bg'])
# Set caret colour
target.SetCaretForeground(spec['caret']['fg'])
target.SetCaretLineBackground(spec['caret']['bg'])
target.SetCaretWidth(1 + ('bold' in spec['caret']['font']))
# Set selection colour
target.SetSelForeground(True, spec['select']['fg'])
target.SetSelBackground(True, spec['select']['bg'])
# Set wrap point
target.edgeGuideColumn = target.prefs['edgeGuideColumn']
target.edgeGuideVisible = target.edgeGuideColumn > 0
# Set line spacing
spacing = min(int(target.prefs['lineSpacing'] / 2), 64) # Max out at 64
target.SetExtraAscent(spacing)
target.SetExtraDescent(spacing)
def applyToRichText(target):
base = ThemeMixin.codeColors['base']
# todo: Add element-specific styling (it must be possible...)
# If dealing with a StdOut, set background from base
target.SetBackgroundColour(
self.hex2rgb(base['bg'], base['bg']))
# Then construct default styles
bold = wx.FONTWEIGHT_BOLD if "bold" in base['font'] else wx.FONTWEIGHT_NORMAL
italic = wx.FONTSTYLE_ITALIC if "italic" in base['font'] else wx.FONTSTYLE_NORMAL
# Override base font with user spec if present
if prefs.coder['outputFont'].lower() == "From Theme...".lower():
fontName = base['font'].replace("bold", "").replace("italic", "").replace(",", "")
else:
fontName = prefs.coder['outputFont']
_font = wx.Font(
int(prefs.coder['outputFontSize']),
wx.FONTFAMILY_TELETYPE, italic,
bold, False,
faceName=fontName
)
_style = wx.TextAttr(
colText=wx.Colour(
self.hex2rgb(base['fg'], base['fg'])),
colBack=wx.Colour(
self.hex2rgb(base['bg'], base['bg'])),
font=_font)
# Then style all text as base
i = 0
for ln in range(target.GetNumberOfLines()):
i += target.GetLineLength(
ln) + 1 # +1 as \n is not included in character count
target.SetStyle(0, i, _style)
def applyToTextCtrl(target):
base = ThemeMixin.codeColors['base']
target.SetForegroundColour(base['fg'])
target.SetBackgroundColour(base['bg'])
# Define dict linking object types to subfunctions
handlers = {
wx.Frame: applyToFrame,
wx.Panel: applyToPanel,
aui.AuiNotebook: applyToNotebook,
psychopy.app.coder.coder.BaseCodeEditor: applyToCodeEditor,
wx.richtext.RichTextCtrl: applyToRichText,
wx.py.shell.Shell: applyToCodeEditor,
wx.ToolBar: applyToToolbar,
wx.StatusBar: applyToStatusBar,
wx.TextCtrl: applyToTextCtrl
}
# If no target supplied, default to using self
if target is None:
target = self
if not hasattr(self, '_recursionDepth'):
self._recursionDepth = 0
else:
self._recursionDepth += 1
appCS = ThemeMixin.appColors
base = ThemeMixin.codeColors['base']
# Abort if target is immune
if hasattr(target, 'immune'):
return
# Style target
isHandled = False
for thisType in handlers:
if isinstance(target, thisType):
handlers[thisType](target)
isHandled = True
if not isHandled:
# try and set colors for target
try:
target.SetBackgroundColour(ThemeMixin.appColors['panel_bg'])
target.SetForegroundColour(ThemeMixin.appColors['text'])
except AttributeError:
pass
# search for children (set in a second step)
if isinstance(target, wx.Sizer):
sizer = target
children = sizer.Children
else:
children = []
if hasattr(target, 'Children'):
children.extend(target.Children)
elif hasattr(target, 'immune'):
pass
elif hasattr(target, 'paneManager'):
for pane in target.paneManager.AllPanes:
children.append(pane.window)
elif hasattr(target, 'Sizer') and target.Sizer:
children.append(target.Sizer)
if hasattr(self, 'btnHandles'):
for thisBtn in self.btnHandles:
pass
# then apply to all children as well
for c in children:
if hasattr(c, '_applyAppTheme'):
# if the object understands themes then request that
c._applyAppTheme()
elif self._recursionDepth>10:
return
else:
# if not then use our own recursive method to search
if hasattr(c, 'Window') and c.Window is not None:
ThemeMixin._applyAppTheme(c.Window)
elif hasattr(c, 'Sizer') and c.Sizer is not None:
ThemeMixin._applyAppTheme(c.Sizer)
# and then apply
# try:
# ThemeMixin._applyAppTheme(c)
# except AttributeError:
# pass
if hasattr(target, 'Refresh'):
target.Refresh()
if hasattr(target, '_mgr'):
target._mgr.Update()
@property
def lexkw(self):
baseC = {
0: ['typedef', 'if', 'else', 'return', 'struct', 'for', 'while', 'do',
'using', 'namespace', 'union', 'break', 'enum', 'new', 'case',
'switch', 'continue', 'volatile', 'finally', 'throw', 'try',
'delete', 'typeof', 'sizeof', 'class', 'volatile', 'int',
'float', 'double', 'char', 'short', 'byte', 'void', 'const',
'unsigned', 'signed', 'NULL', 'true', 'false', 'bool', 'size_t',
'long', 'long long'],
1: []
}
if self.GetLexer() == stc.STC_LEX_PYTHON:
# Python
keywords = {
0: keyword.kwlist + ['cdef', 'ctypedef', 'extern', 'cimport', 'cpdef', 'include'],
1: dir(builtins) + ['self']
}
elif self.GetLexer() == stc.STC_LEX_R:
# R
keywords = {
1: ['function', 'for', 'repeat', 'while', 'if', 'else',
'break', 'local', 'global'],
0: ['NA']
}
elif self.GetLexer() == stc.STC_LEX_CPP:
# C/C++
keywords = baseC.copy()
if hasattr(self, 'filename'):
if self.filename.endswith('.js'):
# JavaScript
keywords = {
0: ['var', 'const', 'let', 'import', 'function', 'if',
'else', 'return', 'struct', 'for', 'while', 'do',
'finally', 'throw', 'try', 'switch', 'case',
'break'],
1: ['null', 'false', 'true']
}
elif any([self.filename.lower().endswith(ext) for ext in (
'.glsl', '.vert', '.frag')]):
# keywords
keywords[0] += [
'invariant', 'precision', 'highp', 'mediump', 'lowp',
'coherent', 'sampler', 'sampler2D', 'layout', 'out',
'in', 'varying', 'uniform', 'attribute']
# types
keywords[0] += [
'vec2', 'vec3', 'vec4', 'mat2', 'mat3', 'mat4',
'ivec2', 'ivec3', 'ivec4', 'imat2', 'imat3', 'imat4',
'bvec2', 'bvec3', 'bvec4', 'bmat2', 'bmat3', 'bmat4',
'dvec2', 'dvec3', 'dvec4', 'dmat2', 'dmat3', 'dmat4']
# reserved
keywords[1] += [
'gl_Position', 'gl_LightSourceParameters',
'gl_MaterialParameters', 'gl_LightModelProducts',
'gl_FrontLightProduct', 'gl_BackLightProduct',
'gl_FrontMaterial', 'gl_BackMaterial', 'gl_FragColor',
'gl_ModelViewMatrix', 'gl_ModelViewProjectionMatrix',
'gl_Vertex', 'gl_NormalMatrix', 'gl_Normal',
'gl_ProjectionMatrix', 'gl_LightSource']
# elif self.GetLexer() == stc.STC_LEX_ARDUINO:
# # Arduino
# keywords = {
# 0: baseC[0],
# 1: baseC[1] + [
# 'BIN', 'HEX', 'OCT', 'DEC', 'INPUT', 'OUTPUT', 'HIGH', 'LOW',
# 'INPUT_PULLUP', 'LED_BUILTIN', 'string', 'array']
# }
# elif self.GetLexer() == stc.STC_LEX_GLSL:
# # GLSL
# glslTypes = []
# baseType = ['', 'i', 'b', 'd']
# dim = ['2', '3', '4']
# name = ['vec', 'mat']
# for i in baseType:
# for j in name:
# for k in dim:
# glslTypes.append(i + j + k)
# keywords = {
# 0: baseC[0] + ['invariant', 'precision', 'highp', 'mediump', 'lowp', 'coherent',
# 'sampler', 'sampler2D'],
# 1: baseC[1]
# }
else:
keywords = {
0: [],
1: []
}
return keywords
@property
def tags(self):
tags = {
"base": stc.STC_STYLE_DEFAULT,
"margin": stc.STC_STYLE_LINENUMBER,
"caret": None,
"select": None,
"indent": stc.STC_STYLE_INDENTGUIDE,
"brace": stc.STC_STYLE_BRACELIGHT,
"controlchar": stc.STC_STYLE_CONTROLCHAR
}
if self.GetLexer() == stc.STC_LEX_PYTHON:
# Python
tags.update({
"operator": stc.STC_P_OPERATOR,
"keyword": stc.STC_P_WORD,
"keyword2": stc.STC_P_WORD2,
"id": stc.STC_P_IDENTIFIER,
"num": stc.STC_P_NUMBER,
"char": stc.STC_P_CHARACTER,
"str": stc.STC_P_STRING,
"openstr": stc.STC_P_STRINGEOL,
"decorator": stc.STC_P_DECORATOR,
"def": stc.STC_P_DEFNAME,
"class": stc.STC_P_CLASSNAME,
"comment": stc.STC_P_COMMENTLINE,
"commentblock": stc.STC_P_COMMENTBLOCK,
"documentation": stc.STC_P_TRIPLE,
"documentation2": stc.STC_P_TRIPLEDOUBLE,
"whitespace": stc.STC_P_DEFAULT
})
elif self.GetLexer() == stc.STC_LEX_R:
# R
tags.update({
"operator": stc.STC_R_OPERATOR,
"keyword": stc.STC_R_BASEKWORD,
"keyword2": stc.STC_R_KWORD,
"id": stc.STC_R_IDENTIFIER,
"num": stc.STC_R_NUMBER,
"char": stc.STC_R_STRING2,
"str": stc.STC_R_STRING,
"infix": stc.STC_R_INFIX,
"openinfix": stc.STC_R_INFIXEOL,
"comment": stc.STC_R_COMMENT,
"whitespace": stc.STC_R_DEFAULT
})
elif self.GetLexer() == stc.STC_LEX_CPP:
# C/C++
tags.update({
"operator": stc.STC_C_OPERATOR,
"keyword": stc.STC_C_WORD,
"keyword2": stc.STC_C_WORD2,
"id": stc.STC_C_IDENTIFIER,
"num": stc.STC_C_NUMBER,
"char": stc.STC_C_CHARACTER,
"str": stc.STC_C_STRING,
"openstr": stc.STC_C_STRINGEOL,
"class": stc.STC_C_GLOBALCLASS,
"comment": stc.STC_C_COMMENT,
"commentblock": stc.STC_C_COMMENTLINE,
"commentkw": stc.STC_C_COMMENTDOCKEYWORD,
"commenterror": stc.STC_C_COMMENTDOCKEYWORDERROR,
"documentation": stc.STC_C_COMMENTLINEDOC,
"documentation2": stc.STC_C_COMMENTDOC,
"whitespace": stc.STC_C_DEFAULT,
"preprocessor": stc.STC_C_PREPROCESSOR,
"preprocessorcomment": stc.STC_C_PREPROCESSORCOMMENT
})
# elif self.GetLexer() == stc.STC_LEX_JSON:
# # JSON
# tags.update({
# "operator": stc.STC_JSON_OPERATOR,
# "keyword": stc.STC_JSON_KEYWORD,
# "uri": stc.STC_JSON_URI,
# "compactiri": stc.STC_JSON_COMPACTIRI,
# "error": stc.STC_JSON_ERROR,
# "espacesequence": stc.STC_JSON_ESCAPESEQUENCE,
# "propertyname": stc.STC_JSON_PROPERTYNAME,
# "ldkeyword": stc.STC_JSON_LDKEYWORD,
# "num": stc.STC_JSON_NUMBER,
# "str": stc.STC_JSON_STRING,
# "openstr": stc.STC_JSON_STRINGEOL,
# "comment": stc.STC_JSON_LINECOMMENT,
# "commentblock": stc.STC_JSON_BLOCKCOMMENT,
# "whitespace": stc.STC_JSON_DEFAULT
# })
return tags
def hex2rgb(self, hex, base=(0, 0, 0, 255)):
if not isinstance(hex, str):
return base
# Make hex code case irrelevant
hex = hex.lower()
# dict of hex -> int conversions
hexkeys = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'#': None}
# Check that hex is a hex code
if not all(c in hexkeys.keys() for c in hex) or not len(hex) == 7:
# Default to transparent if not
return wx.Colour(base)
# Convert to rgb
r = hexkeys[hex[1]] * 16 + hexkeys[hex[2]]
g = hexkeys[hex[3]] * 16 + hexkeys[hex[4]]
b = hexkeys[hex[5]] * 16 + hexkeys[hex[6]]
return wx.Colour(r, g, b, 255)
def shiftColour(self, col, offset=15):
"""Shift colour up or down by a set amount"""
if not isinstance(col, wx.Colour):
return
if col.GetLuminance() < 0.5:
newCol = wx.Colour(
[c+offset for c in col.Get()]
)
else:
newCol = wx.Colour(
[c - offset for c in col.Get()]
)
return newCol
def extractFont(self, fontList, base=[]):
"""Extract specified font from theme spec"""
# Convert to list if not already
if isinstance(base, str):
base = base.split(",")
base = base if isinstance(base, list) else [base]
if isinstance(fontList, str):
fontList = fontList.split(",")
fontList = fontList if isinstance(fontList, list) else [fontList]
# Extract styles
bold, italic = [], []
if "bold" in fontList:
bold = [fontList.pop(fontList.index("bold"))]
if "italic" in fontList:
italic = [fontList.pop(fontList.index("italic"))]
# Extract styles from base, if needed
if "bold" in base:
bold = [base.pop(base.index("bold"))]
if "italic" in base:
italic = [base.pop(base.index("italic"))]
# Append base and default fonts
fontList.extend(base+["JetBrains Mono"])
if "" in fontList:
del fontList[fontList.index("")]
# Set starting font in case none are found
try:
finalFont = [wx.SystemSettings.GetFont(wx.SYS_ANSI_FIXED_FONT).GetFaceName()]
except wx._core.wxAssertionError:
finalFont = [wx.Font(10, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL).GetFaceName()]
# Cycle through font names, stop at first valid font
if sys.platform == 'win32':
for font in fontList:
if font in wx.FontEnumerator().GetFacenames():
finalFont = [font] + bold + italic
break
return ','.join(finalFont)
def _setCodeColors(self, spec):
"""To be called from _psychopyApp only"""
#if not self.GetTopWindow() == self:
# psychopy.logging.warning("This function should only be called from _psychopyApp")
base = spec['base']
base['font'] = self.extractFont(base['font'])
# Make sure there's some spec for margins
if 'margin' not in spec:
spec['margin'] = base
# Make sure there's some spec for caret
if 'caret' not in spec:
spec['caret'] = base
# Make sure there's some spec for selection
if 'select' not in spec:
spec['select'] = base
spec['select']['bg'] = self.shiftColour(base['bg'], 30)
# Pythonise the universal data (hex -> rgb, tag -> wx int)
invalid = []
for key in spec:
# Check that full spec is defined, discard if not
if all(subkey in spec[key] for subkey in ['bg', 'fg', 'font']):
spec[key]['bg'] = self.hex2rgb(spec[key]['bg'], base['bg'])
spec[key]['fg'] = self.hex2rgb(spec[key]['fg'], base['fg'])
spec[key]['font'] = self.extractFont(spec[key]['font'], base['font'])
spec[key]['size'] = int(prefs.coder['codeFontSize'])
elif key in ['app', 'icons']:
pass
else:
invalid += [key]
for key in invalid:
del spec[key]
# we have a valid theme so continue
for key in spec:
ThemeMixin.codeColors[key] = spec[key] # class attribute for all mixin subclasses
def _setAppColors(self, spec):
hexchars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f']
formats = {
"hex|named": [str],
"subnamed1": [str, str],
"subnamed2": [str, str, str],
"hex|named_opacity1": [str, int],
"subnamed1_opacity1": [str, str, int],
"subnamed2_opacity1": [str, str, str, int],
"hex|named_opacity2": [str, float],
"subnamed1_opacity2": [str, str, float],
"subnamed2_opacity2": [str, str, str, float]
}
# Cycle through all values
for key in spec:
# if key not in output:
# continue
val = spec[key]
color = ['invalid']
# Make sure every value is a list
if not isinstance(val, list):
val = [val]
# Figure out what format current spec is in
types = [type(v) for v in val]
format = "invalid"
for f in formats:
if formats[f] == types:
format = f
# Pop out opacity so that it can be assumed not present
if "_opacity" in format:
opacity = round(val.pop(-1))
format = format.replace("_opacity", "")
else:
opacity = 255
# Tell the difference between hex and single named values
if "hex|named" in format:
if val[0] in cLib:
# Extract named colour
color = cLib[val[0]]
format = format.replace("hex|", "")
elif len(val[0]) == 7:
hex = val[0]
if hex[0] == "#" and all([h in hexchars for h in hex[1:].lower()]):
# Convert hex colour
format = format.replace("|named", "")
wxcolor = ThemeMixin.hex2rgb(None, hex)
color = list(wxcolor[:3])
else:
format = "invalid"
else:
format = "invalid"
if "subnamed" in format:
if len(val) == 2 and all([v in cLib for v in val]):
color = cLib[val[0]][val[1]]
elif len(val) == 3 and all([v in cLib for v in val]):
color = cLib[val[0]][val[1]][val[2]]
else:
format = "invalid"
if format == "invalid" \
or "color" not in locals() \
or "opacity" not in locals() \
or "invalid" in color:
raise Exception("Invalid app colour spec")
else:
ThemeMixin.appColors[key] = wx.Colour(color + [opacity])
def getBitmap(name, theme, size=None,
emblem=None, emblemPos='bottom_right'):
"""Retrieves the wx.Bitmap based on name, theme, size and emblem"""
global _allIcons
return _allIcons.getBitmap(name, theme, size, emblem, emblemPos)
class IconCache:
"""A class to load icons and store them just once as a dict of wx.Bitmap
objects according to theme"""
_theme = ThemeMixin
_bitmaps = {}
_buttons = [] # a list of all created buttons
_lastBGColor = None
_lastIcons = None
# def _loadComponentIcons(self, folderList=(), theme=None, forceReload=False):
# """load the icons for all the components
# """
# if theme is None:
# theme = _IconCache.iconTheme
# if forceReload or len(self)==0:
# compons = experiment.getAllComponents(folderList)
# _allIcons = {}
# for thisName, thisCompon in compons.items():
# if thisName in components.iconFiles:
# # darkmode paths
# if "base.png" not in components.iconFiles[thisName]:
# iconFolder = theme
# components.iconFiles[thisName] = join(
# dirname(components.iconFiles[thisName]),
# iconFolder,
# basename(components.iconFiles[thisName])
# )
# _allIcons[thisName] = self._loadIcons(
# components.iconFiles[thisName])
# else:
# _allIcons[thisName] = self._loadIcons(None)
# return _allIcons
# else:
# return _allIcons
def _findImageFile(self, name, theme, emblem=None, size=None):
"""Tries to find a valid icon in a range of places with and without a
size suffix"""
orig = Path(name)
if not orig.suffix: # check we have an image suffix
orig = Path(name+'.png')
if emblem: # add the emblem into the name
orig = orig.with_name(
"{}_{}{}".format(orig.stem, emblem, orig.suffix))
nameAndSize = orig.with_name(orig.stem+str(size)+orig.suffix)
nameAndDouble = orig.with_name(orig.stem+str(size)+"@2x"+orig.suffix)
for filename in [nameAndSize, orig, nameAndDouble]:
# components with no themes folders (themes were added in 2020.2)
if filename.exists():
return str(filename)
# components with theme folders
# try using the theme name (or 'light' as a default name)
for themeName in [theme, 'light']:
thisPath = filename.parent / themeName / filename.name
if thisPath.exists():
return str(thisPath)
# try in the app icons folder (e.g. for "run.png")
thisPath = iconsPath / theme / filename
if thisPath.exists():
return str(thisPath)
# and in the root of the app icons
thisPath = iconsPath / filename
if thisPath.exists():
return str(thisPath)
# still haven't returned nay path. Out of ideas!
logging.warning("Failed to find icon name={}, theme={}, "
"size={}, emblem={}"
.format(name, theme, size, emblem))
def _loadBitmap(self, name, theme, size=None, emblem=None):
"""Creates wxBitmaps based on the image.
png files work best, but anything that wx.Image can load should be fine
Doesn't return the icons, just stores them in the dict
"""
filename = self._findImageFile(name, theme, emblem, size)
if not filename:
filename = self._findImageFile('unknown.png', theme, emblem, size)
# load image with wx.LogNull() to stop libpng complaining about sRGB
nologging = wx.LogNull()
try:
im = wx.Image(filename)
except TypeError:
raise FileNotFoundError("Failed to find icon name={}, theme={}, "
"size={}, emblem={}"
.format(name, theme, size, emblem))
del nologging # turns logging back on
pix = im.GetSize()[0]
if pix > size:
im = im.Scale(pix, pix)
nameMain = _getIdentifier(name, theme, emblem, size)
self._bitmaps[nameMain] = wx.Bitmap(im)
if pix > 24: # for bigger images lets create a 1/2 size one too
nameSmall = _getIdentifier(name, theme, emblem, pix//2)
self._bitmaps[nameSmall] = wx.Bitmap(im.Scale(pix//2, pix//2))
def getBitmap(self, name, theme=None, size=None, emblem=None, beta=False):
"""Retrieves an icon based on its name, theme, size and emblem
either from the cache or loading from file as needed"""
if theme is None:
theme = ThemeMixin.icons
if size is None:
size = 48
identifier = _getIdentifier(name, theme=theme, emblem=emblem, size=size)
# find/load the bitmaps first
if identifier not in IconCache._bitmaps:
# load all size icons for this name
self._loadBitmap(name, theme, emblem=emblem, size=size)
if beta:
# If needed, append beta tag
betaID = _getIdentifier("beta", theme=theme, emblem=emblem, size=size)
if betaID not in IconCache._bitmaps:
self._loadBitmap("beta", theme, emblem=emblem, size=size)
# Get base icon and beta overlay
betaImg = IconCache._bitmaps[betaID].ConvertToImage()
baseImg = IconCache._bitmaps[identifier].ConvertToImage()
# Get color data and alphas
betaData = numpy.array(betaImg.GetData())
betaAlpha = numpy.array(betaImg.GetAlpha(), dtype=int)
baseData = numpy.array(baseImg.GetData())
baseAlpha = numpy.array(baseImg.GetAlpha(), dtype=int)
# Overlay colors
combinedData = baseData
r = numpy.where(betaAlpha > 0)[0] * 3
g = numpy.where(betaAlpha > 0)[0] * 3 + 1
b = numpy.where(betaAlpha > 0)[0] * 3 + 2
combinedData[r] = betaData[r]
combinedData[g] = betaData[g]
combinedData[b] = betaData[b]
# Combine alphas
combinedAlpha = numpy.add(baseAlpha, betaAlpha)
combinedAlpha[combinedAlpha > 255] = 255
combinedAlpha = numpy.uint8(combinedAlpha)
# Set these back to the base image
combined = betaImg
combined.SetData(combinedData)
combined.SetAlpha(combinedAlpha)
# Replace icon
identifier += "_beta"
IconCache._bitmaps[identifier] = combined.ConvertToBitmap()
return IconCache._bitmaps[identifier]
def makeBitmapButton(self, parent, filename,
name="", # name of Component e.g. TextComponent
label="", # label on the button, often short name
emblem=None,
toolbar=None, tip=None, size=None,
tbKind=wx.ITEM_NORMAL, theme=None):
if theme is None:
theme = ThemeMixin.icons
bmp = self.getBitmap(filename, theme, size, emblem)
if toolbar:
if 'phoenix' in wx.PlatformInfo:
button = toolbar.AddTool(wx.ID_ANY, label=label,
bitmap=bmp, shortHelp=tip,
kind=tbKind)
else:
button = toolbar.AddSimpleTool(wx.ID_ANY, label=label,
bitmap=bmp, shortHelp=tip,
kind=tbKind)
else:
button = wx.Button(parent, wx.ID_ANY,
label=label, name=name, style=wx.NO_BORDER)
button.SetBitmap(bmp)
button.SetBitmapPosition(wx.TOP)
button.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# just for regular buttons (not toolbar objects) we can re-use
buttonInfo = {'btn': button,
'filename': filename,
'size': size,
'emblem': emblem,
'theme': theme}
self._buttons.append(buttonInfo)
if tip:
button.SetToolTip(wx.ToolTip(tip))
return button
def getComponentButton(self, parent, name, label,
theme=None, size=None, emblem=None,
tip=""):
"""Checks in the experiment.components.iconFiles for filename and
loads it into a wx.Bitmap"""
if name in components.iconFiles:
filename = components.iconFiles[name]
btn = self.makeBitmapButton(
parent=parent,
filename=filename, name=name, label=label,
tip=tip, size=size)
return btn
def getComponentBitmap(self, name, size=None):
"""Checks in the experiment.components.iconFiles for filename and
loads it into a wx.Bitmap"""
if type(name) != str: # got a class instead of a name?
name = name.getType()
if name in components.iconFiles:
filename = components.iconFiles[name]
bmp = self.getBitmap(name=filename, size=size)
return bmp
else:
print(components.iconFiles)
raise ValueError("Failed to find '{}' in components.iconFiles"
.format(name))
def setTheme(self, theme):
if theme.icons != IconCache._lastIcons:
for thisBtn in IconCache._buttons:
if thisBtn['btn']: # Check that button hasn't been deleted
newBmp = self.getBitmap(name=thisBtn['filename'],
size=thisBtn['size'],
theme=theme.icons,
emblem=thisBtn['emblem'])
thisBtn['btn'].SetBitmap(newBmp)
thisBtn['btn'].SetBitmapCurrent(newBmp)
thisBtn['btn'].SetBitmapPressed(newBmp)
thisBtn['btn'].SetBitmapFocus(newBmp)
thisBtn['btn'].SetBitmapDisabled(newBmp)
thisBtn['btn'].SetBitmapPosition(wx.TOP)
IconCache._lastIcons = theme.icons
if theme.appColors['frame_bg'] != IconCache._lastBGColor:
for thisBtn in IconCache._buttons:
try:
thisBtn['btn'].SetBackgroundColour(
theme.appColors['frame_bg'])
except RuntimeError:
pass
IconCache._lastBGColor = theme
def _getIdentifier(name, theme, emblem, size=None):
if size is None:
return "{}_{}_{}".format(name, theme, emblem)
else:
return "{}_{}_{}_{}".format(name, theme, emblem, size)
class PsychopyTabArt(aui.AuiDefaultTabArt, ThemeMixin):
def __init__(self):
aui.AuiDefaultTabArt.__init__(self)
self.SetDefaultColours()
self.SetAGWFlags(aui.AUI_NB_NO_TAB_FOCUS)
def SetDefaultColours(self):
"""
Sets the default colours, which are calculated from the given base colour.
:param `base_colour`: an instance of :class:`wx.Colour`. If defaulted to ``None``, a colour
is generated accordingly to the platform and theme.
"""
cs = ThemeMixin.appColors
self.SetBaseColour( wx.Colour(cs['tab_bg']) )
self._background_top_colour = wx.Colour(cs['panel_bg'])
self._background_bottom_colour = wx.Colour(cs['panel_bg'])
self._tab_text_colour = lambda page: cs['text']
self._tab_top_colour = wx.Colour(cs['tab_bg'])
self._tab_bottom_colour = wx.Colour(cs['tab_bg'])
self._tab_gradient_highlight_colour = wx.Colour(cs['tab_bg'])
self._border_colour = wx.Colour(cs['tab_bg'])
self._border_pen = wx.Pen(self._border_colour)
self._tab_disabled_text_colour = cs['text']
self._tab_inactive_top_colour = wx.Colour(cs['panel_bg'])
self._tab_inactive_bottom_colour = wx.Colour(cs['panel_bg'])
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Extends AuiDefaultTabArt.DrawTab to add a transparent border to inactive tabs
"""
if page.active:
self._border_pen = wx.Pen(self._border_colour)
else:
self._border_pen = wx.TRANSPARENT_PEN
out_tab_rect, out_button_rect, x_extent = aui.AuiDefaultTabArt.DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control)
return out_tab_rect, out_button_rect, x_extent
class PsychopyDockArt(aui.AuiDefaultDockArt):
def __init__(self):
aui.AuiDefaultDockArt.__init__(self)
cs = ThemeMixin.appColors
# Gradient
self._gradient_type = aui.AUI_GRADIENT_NONE
# Background
self._background_colour = wx.Colour(cs['frame_bg'])
self._background_gradient_colour = wx.Colour(cs['frame_bg'])
self._background_brush = wx.Brush(self._background_colour)
# Border
self._border_size = 0
self._border_pen = wx.Pen(cs['frame_bg'])
# Sash
self._draw_sash = True
self._sash_size = 5
self._sash_brush = wx.Brush(cs['frame_bg'])
# Gripper
self._gripper_brush = wx.Brush(cs['frame_bg'])
self._gripper_pen1 = wx.Pen(cs['frame_bg'])
self._gripper_pen2 = wx.Pen(cs['frame_bg'])
self._gripper_pen3 = wx.Pen(cs['frame_bg'])
self._gripper_size = 0
# Hint
self._hint_background_colour = wx.Colour(cs['frame_bg'])
# Caption bar
self._inactive_caption_colour = wx.Colour(cs['docker_bg'])
self._inactive_caption_gradient_colour = wx.Colour(cs['docker_bg'])
self._inactive_caption_text_colour = wx.Colour(cs['docker_fg'])
self._active_caption_colour = wx.Colour(cs['docker_bg'])
self._active_caption_gradient_colour = wx.Colour(cs['docker_bg'])
self._active_caption_text_colour = wx.Colour(cs['docker_fg'])
# self._caption_font
self._caption_size = 25
self._button_size = 20
class ThemeSwitcher(wx.Menu):
"""Class to make a submenu for switching theme, meaning that the menu will
always be the same across frames."""
def __init__(self, frame):
# Get list of themes
themePath = Path(prefs.paths['themes'])
themeList = {}
for themeFile in themePath.glob("*.json"):
try:
with open(themeFile, "rb") as fp:
theme = json.load(fp)
# Add themes to list only if min spec is defined
base = theme['base']
if all(key in base for key in ['bg', 'fg', 'font']):
themeList[themeFile.stem] = theme['info'] if "info" in theme else ""
except (FileNotFoundError, IsADirectoryError):
pass
# Make menu
wx.Menu.__init__(self)
# Make priority theme buttons
priority = ["PsychopyDark", "PsychopyLight", "ClassicDark", "Classic"]
for theme in priority:
tooltip = themeList.pop(theme)
item = self.AppendRadioItem(wx.ID_ANY, _translate(theme), tooltip)
# Bind to theme change method
frame.Bind(wx.EVT_MENU, frame.app.onThemeChange, item)
# Make other theme buttons
for theme in themeList:
item = self.AppendRadioItem(wx.ID_ANY, _translate(theme), help=themeList[theme])
frame.Bind(wx.EVT_MENU, frame.app.onThemeChange, item)
self.AppendSeparator()
# Add Theme Folder button
item = self.Append(wx.ID_ANY, _translate("Open theme folder"))
frame.Bind(wx.EVT_MENU, self.openThemeFolder, item)
def openThemeFolder(self, event):
# Choose a command according to OS
if sys.platform in ['win32']:
comm = "explorer"
elif sys.platform in ['darwin']:
comm = "open"
elif sys.platform in ['linux', 'linux2']:
comm = "dolphin"
# Use command to open themes folder
subprocess.call(f"{comm} {prefs.paths['themes']}", shell=True)
def _applyAppTheme(self):
for item in self.GetMenuItems():
if item.IsRadio(): # This means it will not attempt to check the separator
item.Check(item.ItemLabel.lower() == ThemeMixin.codetheme.lower())
| gpl-3.0 |
tbs1980/Kaggle_DecMeg2014 | source/StackedGeneralisation.py | 1 | 11066 | # method described in http://arxiv.org/abs/1404.4175
import numpy as np
import Classify
import RBMPipeline
import Utils
import sys
from scipy.io import loadmat
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
import pywt
from statsmodels.robust import stand_mad
class StackedGeneralisation:
def __init__(self,path_to_data):
self._Path2Data=path_to_data
self._subjects_train=range(1,13)
self._subjects_train_testing=range(12,17)
self._subjects_test=range(17,24)
self._tmin = 0.0
self._tmax = 0.5
self._first_layer_classifiers=[]
self._first_layer_classifiers_1=[]
self._data_X=[]
self._data_y=[]
self._data_layer_1_X=[]
self._data_layer_1_y=[]
self._clfr2=[]
self._data_X_testing=[]
self._data_y_testing=[]
self._data_layer_1_X_testing=[]
self._data_layer_1_y_testing=[]
self._sample_weights=[]
self._ids_test=[]
self._ypred_test=[]
self._acc_thr=0.2
self._sensor_scores=[]
self._top_channels=[]
def ApplyTimeWindow(self,XX, tmin, tmax, sfreq, tmin_original=-0.5):
"""
A function to apply the desired time window.
@param XX a matrix of the shape [trial x channel x time]
@param tmin start point of the time window
@param tmax end point of the time window
@param tmin_original original start point of the time window
"""
#print "Applying the desired time window."
#print
beginning = np.round((tmin - tmin_original) * sfreq).astype(np.int)
end = np.round((tmax - tmin_original) * sfreq).astype(np.int)
XX = XX[:, :, beginning:300].copy()
#if np.shape(XX)[2] != 128 :
#raise ValueError("the time series should have 128 elements")
return XX
def ComputeScores(self,X,y):
print "Computing cross-validated accuracy for each channel."
clf = LogisticRegression(random_state=0)
score_channel = np.zeros(X.shape[1])
cv=5
for channel in range(X.shape[1]):
X_channel = X[:,channel,:].copy()
#X_channel -= X_channel.mean(0)
#X_channel = np.nan_to_num(X_channel / X_channel.std(0))
scores = cross_val_score(clf, X_channel, y, cv=cv, scoring='accuracy')
score_channel[channel] = scores.mean()
#print "Channel :" ,channel," score: ",score_channel[channel]
return score_channel
def ApplyZnorm(self,XX):
"""
Apply normalisation.
@param XX a matrix of the shape [trial x channel x time]
"""
#print "Features Normalization."
#print
XX -= XX.mean(0)
XX = np.nan_to_num(XX / XX.std(0))
return XX
def ApplySVD(self,XX,num_components):
"""
Apply SVD to each trial and take the most important componetns
@param XX a matrix of the shape [trial x channel x time]
@param num_components number of componetns to consider in reduction
"""
#print "appling svd with componetns",num_components
#print
for i in range(np.shape(XX)[0]):
mat=XX[i,:,:]
u,s,v=np.linalg.svd(mat,full_matrices=False)
snew=np.zeros(np.shape(s))
if int(num_components) > snew.size-1 or num_components < 0:
print "input num_components ",num_components
print "changin to ",snew.size-1
num_components=snew.size-1
snew[0:int(num_components)]=s[0:int(num_components)]
S=np.diag(snew)
XX[i,:,:]=np.dot(u,np.dot(S,v))
return XX
def ApplyWaveletTransform(self,XX):
XXret = np.zeros( [np.shape(XX)[0],np.shape(XX)[1],188] )
for i in range(np.shape(XX)[0]):
for j in range(np.shape(XX)[1]):
#print "before ",np.mean(XX[i,j,0:128])
sig = XX[i,j,:]
wcfs = pywt.wavedec(sig,wavelet='db2')
#if i==0 and j==0 :
#print "shape of wcfs= ",np.shape(wcfs)
#wcfs[0] = np.zeros(np.shape(wcfs[0]))
wcfs = np.concatenate(wcfs)
XXret[i,j,:] = wcfs
#sigma = stand_mad(wcfs[-1])
#uthresh = sigma*np.sqrt(2*np.log(len(sig)))
#dewcfs = wcfs[:]
#dewcfs[1:] = (pywt.thresholding.soft(k, value=uthresh) for k in dewcfs[1:])
#dewcfs = np.concatenate(dewcfs)
#XXret[i,j,:] = dewcfs
#print "after ",np.mean(XX[i,j,0:128])
return XXret
def ApplySVDToFeatures(self,mat,num_components):
u,s,v=np.linalg.svd(mat,full_matrices=False)
snew=np.zeros(np.shape(s))
if int(num_components) > snew.size-1 or num_components < 0:
print "input num_components ",num_components
print "changin to ",snew.size-1
num_components=snew.size-1
snew[0:int(num_components)]=s[0:int(num_components)]
S=np.diag(snew)
return np.dot(u,np.dot(S,v))
def ReshapeToFeaturesVector(self,XX):
"""
Reshape the matrix to a set of features by concatenating
the 306 timeseries of each trial in one long vector.
@param XX a matrix of the shape [trial x channel x time]
"""
#print "2D Reshaping: concatenating all 306 timeseries."
#print
XX = XX.reshape(XX.shape[0], XX.shape[1] * XX.shape[2])
return XX
def ProcessData(self,XX,sfreq,tmin_original):
"""
Process data
@param XX a matrix of the shape [trial x channel x time]
@param sfreq Frequency of the sampling
@param tmin_original Original start point of the window.
"""
XX=self.ApplyTimeWindow(XX, self._tmin, self._tmax, sfreq,tmin_original)
#XX=self.ApplySVD(XX,100)
XX=self.ApplyZnorm(XX)
#XX = self.ApplyWaveletTransform(XX)
XX = self.ReshapeToFeaturesVector(XX)
#XX = self.ApplySVDToFeatures(XX,570)
return XX
#now take only the good top channels
#XXtop=XX[:,self._top_channels[:],:]
#XXtop=XX[:,self._top_channels[0:200],:]
#return self.ReshapeToFeaturesVector(XXtop)
def GetTrainData(self,filename):
print "Loading", filename
data = loadmat(filename, squeeze_me=True)
XX = data['X']
yy = data['y']
sfreq = data['sfreq']
tmin_original = data['tmin']
XX=self.ProcessData(XX,sfreq,tmin_original)
return (XX,yy)
def GetTestData(self,filename):
print "Loading", filename
data = loadmat(filename, squeeze_me=True)
XX = data['X']
ids = data['Id']
sfreq = data['sfreq']
tmin_original = data['tmin']
XX=self.ProcessData(XX,sfreq,tmin_original)
return (XX,ids)
def FindTheBestChannels(self):
for subject in self._subjects_train:
filename = self._Path2Data+'/train_subject%02d.mat' % subject
data = loadmat(filename, squeeze_me=True)
XX = data['X']
yy = data['y']
sfreq = data['sfreq']
tmin_original = data['tmin']
XX=self.ApplyTimeWindow(XX, self._tmin, self._tmax, sfreq,tmin_original)
XX=self.ApplyZnorm(XX)
scr=self.ComputeScores(XX,yy)
#print scr
self._sensor_scores.append(scr)
print "shape of secore= ",np.shape(self._sensor_scores)
self._sensor_scores=np.vstack(self._sensor_scores)
scr_means=np.zeros(np.shape(self._sensor_scores)[1])
for i in range(np.shape(self._sensor_scores)[1]):
scr_means[i]=np.mean(self._sensor_scores[:,i])
self._top_channels=np.argsort(scr_means)
def CreateFistLayer(self):
for subject in self._subjects_train:
filename = self._Path2Data+'/train_subject%02d.mat' % subject
X,y=self.GetTrainData(filename)
self._data_layer_1_y.append(y)
self._data_X.append(X)
self._data_y.append(y)
clfr = Classify.LogisticRegression(X, y,None, None)
#clfr = Classify.QuadraticDiscriminantAnalysis(X, y,None, None)
#clfr_1 = Classify.SupportVectorMachine(X, y,None, None)
#clfr = RBMPipeline.LogRegWithRBMFeatures(X, y,None, None)
self._first_layer_classifiers.append(clfr)
#self._first_layer_classifiers_1.append(clfr_1)
#make all the predictions into one vector
self._data_layer_1_y=np.concatenate(self._data_layer_1_y)
#return
#now create first layer of predictions
for i in range(len(self._subjects_train)):
ypred_1=[]
#ypred_1_c1=[]
for j in range(len(self._subjects_train)):
ypred=self._first_layer_classifiers[i].predict(self._data_X[j])
#ypredc1=self._first_layer_classifiers_1[i].predict(self._data_X[j])
print "error of classifer 0 " ,i,"for data ",j,"=", float(sum(abs(ypred-self._data_y[j])))/float(len(self._data_y[j]))*100,"%"
#print "error of classifer 1 " ,i,"for data ",j,"=", float(sum(abs(ypredc1-self._data_y[j])))/float(len(self._data_y[j]))*100,"%"
ypred_1.append(ypred)
#ypred_1_c1.append(ypredc1)
#concatenate all the predictions into a feature vector
ypred_1 = np.concatenate(ypred_1)
#ypred_1_c1 = np.concatenate(ypred_1_c1)
self._data_layer_1_X.append(ypred_1)
#self._data_layer_1_X.append(ypred_1_c1)
self._data_layer_1_X=np.vstack(self._data_layer_1_X).T
#now the second layer
print
print "creating the second layer"
print
print "shape =",np.shape(self._data_layer_1_X),np.shape(self._data_layer_1_y)
self._clfr2=Classify.LogisticRegression(self._data_layer_1_X, self._data_layer_1_y,None, None)
#self._clfr2=Classify.RandomForest(self._data_layer_1_X, self._data_layer_1_y,None, None)
#self._clfr2=RBMPipeline.LogRegWithRBMFeatures(self._data_layer_1_X, self._data_layer_1_y,None, None)
def TestSeconLayer(self):
for subject in self._subjects_train_testing:
filename = self._Path2Data+'/train_subject%02d.mat' % subject
X,y=self.GetTrainData(filename)
self._data_layer_1_y_testing.append(y)
self._data_X_testing.append(X)
self._data_y_testing.append(y)
self._data_layer_1_y_testing=np.concatenate(self._data_layer_1_y_testing)
#now create first layer of predictions
for i in range(len(self._subjects_train)):#since we have subjects_train number of features here
ypred_1=[]
#ypred_1_c1=[]
for j in range(len(self._subjects_train_testing)):
ypred=self._first_layer_classifiers[i].predict(self._data_X_testing[j])
#ypredc1=self._first_layer_classifiers_1[i].predict(self._data_X_testing[j])
ypred_1.append(ypred)
#ypred_1_c1.append(ypredc1)
ypred_1 = np.concatenate(ypred_1)
#ypred_1_c1 = np.concatenate(ypred_1_c1)
self._data_layer_1_X_testing.append(ypred_1)
#self._data_layer_1_X_testing.append(ypred_1_c1)
self._data_layer_1_X_testing=np.vstack(self._data_layer_1_X_testing).T
ypred_test = self._clfr2.predict(self._data_layer_1_X_testing)
print "error for 2nd classifer =", float(sum(abs(ypred_test-self._data_layer_1_y_testing)))/float(len(self._data_layer_1_y_testing))*100,"%"
def PredictTestData(self):
print "prediction"
for subject in self._subjects_test:
filename = self._Path2Data+'/test_subject%02d.mat' % subject
X,ids=self.GetTestData(filename)
self._data_X_testing.append(X)
self._ids_test.append(ids)
self._ids_test = np.concatenate(self._ids_test)
#now create first layer of predictions
for i in range(len(self._subjects_train)):#since we have subjects_train number of features here
ypred_1=[]
for j in range(len(self._subjects_test)):
ypred=self._first_layer_classifiers[i].predict(self._data_X_testing[j])
ypred_1.append(ypred)
ypred_1 = np.concatenate(ypred_1)
self._data_layer_1_X_testing.append(ypred_1)
self._data_layer_1_X_testing=np.vstack(self._data_layer_1_X_testing).T
self._ypred_test = self._clfr2.predict(self._data_layer_1_X_testing)
return (self._ypred_test,self._ids_test)
| gpl-2.0 |
makersauce/stocks | strategy3.py | 1 | 5995 | #Stragegy File
###INTERSECTING WATERFALL STRATEGY
###When 5,10,30 day moving averages intersect and make a triangle
## with 30 day on top, 10 day in middle and 5 day on the bottom,
## Buy at when the percent increase from MARKET to 30_DAY reaches threshold UNLESS the 5 day average exceeds the 10 day or 30 day.
## Sell when the 5 Day decreases
import os
import random
import datetime
from stock import Stock, piggy
from sys import argv
from sym2 import symbols
def strategy(symbol):
stock = Stock(symbol)
stock.update_history()
stock.analyze()
buy_dates = []
buy_prices = []
sell_dates = []
sell_prices = []
wiggly = piggy(sim=True,holdings=300)
buy_flag = False
staleness = 0
sell_flag = False
earn_flag = False
last_bought = 0
for itx, date in enumerate(stock.history_data['Date']):
ptrn_lookahead = 5
prox_thres = .02 * float(stock.history_data['Close'][itx])
if float(stock.history_data['30_Day'][itx-ptrn_lookahead]) - prox_thres <= float(stock.history_data['10_Day'][itx-ptrn_lookahead]) <= float(stock.history_data['30_Day'][itx-ptrn_lookahead]) + prox_thres\
and float(stock.history_data['30_Day'][itx-ptrn_lookahead]) - prox_thres <= float(stock.history_data['5_Day'][itx-ptrn_lookahead]) <= float(stock.history_data['30_Day'][itx-ptrn_lookahead]) + prox_thres\
and float(stock.history_data['30_Day'][itx]) > float(stock.history_data['10_Day'][itx]) > float(stock.history_data['5_Day'][itx]):
buy_flag = True
staleness = 0
gains_thres = 1.1
buy_slop_err = 0.001
if buy_flag \
and float(stock.history_data['5_Day'][itx]) * gains_thres < float(stock.history_data['30_Day'][itx]) \
and float(stock.history_data['5_Day'][itx]) > float(stock.history_data['5_Day'][itx-1]) * (1+buy_slop_err): ##Once there is enough margin betweet the 5 day and 30 day, buy
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
buy_flag = False
staleness = 0
earn_flag = True
num = int(wiggly.holdings * .5 / float(stock.history_data['Close'][itx]))
wiggly.buy(stock,num,date=date)
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
last_bought = itx
if (buy_flag
and staleness > 20
or (float(stock.history_data['5_Day'][itx]) > float(stock.history_data['10_Day'][itx]))):
buy_flag = False
staleness = 0
earn_thres = 1.2
if (earn_flag
and float(stock.history_data['10_Day'][itx]) > float(stock.history_data['10_Day'][itx-1]) * (1+buy_slop_err)
and float(stock.history_data['Close'][itx]) > float(stock.history_data['5_Day'][last_bought]) * earn_thres): ## and the 5 Day is increasing, then throw the EARNING flag
earn_flag = False
sell_flag = True
ceiling = .5
if (sell_flag
and float(stock.history_data['5_Day'][itx]) < float(stock.history_data['5_Day'][itx-1])) \
or (sell_flag and float(stock.history_data['5_Day'][itx]) > float(stock.history_data['5_Day'][itx]) * (ceiling + 1)):
sell_flag = False
wiggly.sell(stock,-1,date=date)
sell_dates.append(date)
sell_prices.append(float(stock.history_data['Close'][itx]))
staleness += 1
if wiggly.current_stock[stock.symbol] > 0:
print "\n\n#####Closing Out######"
wiggly.sell(stock,-1,date=date)
##Make a plot
import matplotlib.pyplot as plt
import matplotlib.dates as plotdate
import matplotlib.lines as line
import numpy as np
months = plotdate.MonthLocator() # every year
days = plotdate.DayLocator() # every month
monthsFmt = plotdate.DateFormatter('%m %d')
fig, ax = plt.subplots()
#ax2 = ax.twinx()
t = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in stock.history_data['Date']]
ax.axis('auto')
# format the ticks
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.xaxis.set_minor_locator(days)
fig.autofmt_xdate()
ax.plot(t, stock.history_data['5_Day'], '#0000FF')
ax.plot(t, stock.history_data['10_Day'], '#5555FF')
ax.plot(t, stock.history_data['30_Day'], '#9999FF')
#ax.plot(t, stock.history_data['80_Day'], '#AAAAFF')
#ax2.plot(t, stock.history_data['Volume'], '#CCFFCC')
#ax2.plot(t, stock.history_data['10_Day_Vol'], '#88AA88')
buy_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in buy_dates]
ax.plot(buy_dates,buy_prices, 'g|',ms=100)
sell_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in sell_dates]
ax.plot(sell_dates,sell_prices, 'b|',ms=100)
ax.plot(t, stock.history_data['Close'], 'r-')
plt.title(stock.symbol)
#ax.text(t[12], 250, 'hello')
plt.show()
return {'gains': wiggly.gains, 'symbol': stock.symbol, 'initial_value':stock.history_data['Open'][1]}
if __name__ == '__main__':
if len(argv) > 1:
for arg in argv[1:]:
try:
strategy(arg)
except:
exit()
else:
final_earnings = []
rando = random.randint(0,len(symbols['nasdaq'])-20)
for symbol in symbols['nasdaq'][rando:rando+20]:
try:
final_earnings.append(strategy(symbol))
except:
pass
i = 0
while os.path.exists("results/test%s.csv" % i):
i += 1
f = open('results/test%s.csv' % i,'w+')
f.write('Test Results\n')
f.write('Symbol\tInitial\tGains\n')
for earnings in final_earnings:
f.write(earnings['symbol'] + '\t')
f.write(str(earnings['initial_value']) + '\t')
f.write(str(earnings['gains']) + '\t\n')
f.close()
| mit |
dkushner/zipline | tests/test_sources.py | 17 | 7041 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.assets import AssetFinder
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df)
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
def test_yahoo_bars_to_panel_source(self):
finder = AssetFinder()
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
copy_panel = data.copy()
sids = finder.map_identifier_index_to_sids(
data.items, data.major_axis[0]
)
copy_panel.items = sids
source = DataPanelSource(copy_panel)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertTrue(event['sid'] in sids)
def test_nan_filter_dataframe(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
def test_nan_filter_panel(self):
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
| apache-2.0 |
cjayb/mne-python | examples/time_frequency/plot_time_frequency_simulated.py | 18 | 8475 | """
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <[email protected]>
# Denis Engemann <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
from mne.viz import centers_to_edges
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
x, y = centers_to_edges(epochs.times * 1000, freqs)
mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
| bsd-3-clause |
jpautom/scikit-learn | sklearn/decomposition/pca.py | 20 | 23579 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
explained_variance_.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=1)`.
n_components_ : int
The estimated number of components. Relevant when `n_components` is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_) # doctest: +ELLIPSIS
[ 6.6162... 0.05038...]
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space using `n_components_`.
Returns an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components. X represents
data from the projection on to the principal components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
borisz264/mod_seq | structure_ROC_curves/roc_curves_compare_annotations.py | 1 | 5995 | __author__ = 'boris'
"""
THIS IS FOR TROUBLESHOOTING AND COMPARING DIFFERENT TRUE POSITIVE AND TRUE NEGATIVE ANNOTATIONS
5'e end data is a pickled dict of form srt_dict[strand][chrom][position] = counts at position
take the 5' end data from count_reads_and_mismatches.py, as well as any number of files output by
compute_true_positive_negative.py
and compute:
1) 90% windorize the input data (All data above 95th percentile set to 95th percentile)
2) normalize to highest position in rRNA (should just stick to the rRNA of interest, which will be 18S for my initial test)
3) slide a cutoff from 0 to 1 in ~10,000 steps, computing % of true positives and true negatives called positive at each step
4) plot these two percentages against each other for each step, also output these values as a spreadsheet
also plot y=x for reference
"""
import sys, mod_utils, os
import numpy
from scipy.stats.mstats import winsorize
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines
from collections import defaultdict
def winsorize_norm_chromosome_data(read_5p_ends, chromosome, strand, genome_dict, nucs_to_count, to_winsorize = True, low = 0, high = 0.95):
"""
:param read_5p_ends:
:param chromosome:
:param strand:
:param genome_dict:
:param nucs_to_count:
:param low:
:param high:
:return: an array (now zero-indexed from 1-indexed) of densities for the given chromosome on the given strand, winsorized, and only for the given nucleotides
"""
max_position = max(read_5p_ends[strand][chromosome].keys())
density_array =numpy.array([0] * max_position)
for position in read_5p_ends[strand][chromosome].keys():
if genome_dict[chromosome][position-1] in nucs_to_count:
density_array[position-1] = read_5p_ends[strand][chromosome][position]
if to_winsorize:
winsorize(density_array, limits = (low, 1-high), inplace = True)
normed_array = density_array/float(max(density_array))
return normed_array
def get_tp_tn(tp_tn_file):
TP = set()
TN = set()
f = open(tp_tn_file)
for line in f:
ll= line.strip('\n').split('\t')
if ll[2] == 'TP':
TP.add(int(ll[0]))
if ll[2] =='TN':
TN.add(int(ll[0]))
f.close()
return TP, TN
def call_positives(density_array, chromosome, strand, genome_dict, nucs_to_count, cutoff):
"""
:param density_array:
:return:a set of called positive positions
I've reverted these to 1-indexed to match the TP and TN calls from the structures
"""
positives = set()
for i in range(len(density_array)):
if genome_dict[chromosome][i-1] in nucs_to_count:
if density_array[i] >= cutoff:
positives.add(i)#adding 1 not necessary, since the modified nucleotide is the one 1 upstream of the RT stop!!!
return positives
def plot_ROC_curves(roc_curves, out_prefix):
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)#first a pie chart of mutated nts
color_index = 0
for name in roc_curves:
x, y = roc_curves[name]
plot.plot(x, y, lw =2, label = name, color = mod_utils.rainbow[color_index])
color_index +=1
plot.plot(x, x, lw =1, ls = 'dashed', color = mod_utils.rainbow[color_index], label = 'y=x')
plot.set_xlabel('False positive rate (%) (100-specificity)')
plot.set_ylabel('True positive rate (%) (sensitivity)')
lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2)
lg.draw_frame(False)
plt.savefig(out_prefix + '.pdf', transparent='True', format='pdf')
plt.clf()
def pie_read_5p_ends(read_5p_ends, genome_dict, out_prefix):
nuc_counts = defaultdict(int)
for chromosome in read_5p_ends['+']:
for position in read_5p_ends['+'][chromosome]:
if position-2 > 0 :
nuc = genome_dict[chromosome][position-1]
nuc_counts[nuc] += read_5p_ends['+'][chromosome][position]
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)#first a pie chart of mutated nts
labels = sorted(nuc_counts.keys())
sizes = [nuc_counts[nt] for nt in labels]
plot.pie(sizes, labels = labels, colors = mod_utils.rainbow)
plot.set_title('nt exactly at read 5p ends across rRNA')
plt.savefig(out_prefix + '_nt_5p_ends.pdf', transparent='True', format='pdf')
plt.clf()
def main():
read_5p_ends_file, genome_fasta, outprefix = sys.argv[1:4]
tp_tn_annotations = sys.argv[4:]#true positive and true negative annotations
genome_dict = mod_utils.convertFastaToDict(genome_fasta)
read_5p_ends = mod_utils.unPickle(read_5p_ends_file)
normed_density_array = winsorize_norm_chromosome_data(read_5p_ends, 'S.c.18S_rRNA', '+', genome_dict, 'ACTG')
real_tp_tn_data = []
for filename in tp_tn_annotations:
real_tp, real_tn = get_tp_tn(filename)
real_tp_tn_data.append((os.path.basename(filename), real_tp, real_tn))
roc_curves = {}
for entry in real_tp_tn_data:
roc_curves[entry[0]] = [[],[]]#x and y value arrays for each
stepsize = 0.0001
for cutoff in numpy.arange(0,1.+5*stepsize, stepsize):
called_p = call_positives(normed_density_array, 'S.c.18S_rRNA', '+', genome_dict, 'AC', cutoff)
for entry in real_tp_tn_data:
#print called_p.intersection(entry[1])
num_tp_called = len(called_p.intersection(entry[1]))#how many true positives called at this cutoff
num_fp_called = len(called_p.intersection(entry[2]))#how many fp positives called at this cutoff
roc_curves[entry[0]][0].append(100.*num_fp_called/float(len(entry[2])))#FP rate on x axis
roc_curves[entry[0]][1].append(100.*num_tp_called/float(len(entry[1])))#TP rate on y axis
plot_ROC_curves(roc_curves, outprefix)
#pie_read_5p_ends(read_5p_ends, genome_dict, outprefix)
if __name__ == '__main__':
main() | mit |
bgroveben/python3_machine_learning_projects | oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_scaling.py | 4 | 1505 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import (StandardScaler, MinMaxScaler, Normalizer,
RobustScaler)
from .plot_helpers import cm2
def plot_scaling():
X, y = make_blobs(n_samples=50, centers=2, random_state=4, cluster_std=1)
X += 3
plt.figure(figsize=(15, 8))
main_ax = plt.subplot2grid((2, 4), (0, 0), rowspan=2, colspan=2)
main_ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm2, s=60)
maxx = np.abs(X[:, 0]).max()
maxy = np.abs(X[:, 1]).max()
main_ax.set_xlim(-maxx + 1, maxx + 1)
main_ax.set_ylim(-maxy + 1, maxy + 1)
main_ax.set_title("Original Data")
other_axes = [plt.subplot2grid((2, 4), (i, j))
for j in range(2, 4) for i in range(2)]
for ax, scaler in zip(other_axes, [StandardScaler(), RobustScaler(),
MinMaxScaler(), Normalizer(norm='l2')]):
X_ = scaler.fit_transform(X)
ax.scatter(X_[:, 0], X_[:, 1], c=y, cmap=cm2, s=60)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_title(type(scaler).__name__)
other_axes.append(main_ax)
for ax in other_axes:
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
| mit |
rexshihaoren/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
anugrah-saxena/pycroscopy | pycroscopy/analysis/utils/atom_finding_general_gaussian.py | 1 | 29551 | # -*- coding: utf-8 -*-
"""
@author: Ondrej Dyck
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import numpy as np
from scipy.optimize import least_squares
import itertools as itt
import multiprocessing as mp
import time as tm
from _warnings import warn
from sklearn.neighbors import KNeighborsClassifier
import h5py
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import sys
from ...io.io_utils import recommendCores, realToCompound
from ...io.microdata import MicroDataset, MicroDataGroup
from ...io.io_hdf5 import ioHDF5
from ...viz import plot_utils
from ..model import Model
def do_fit(single_parm):
parms = single_parm[0]
coef_guess_mat = parms[1]
fit_region = parms[2]
s1 = parms[3]
s2 = parms[4]
lb_mat = parms[5]
ub_mat = parms[6]
kwargs = single_parm[1]
max_function_evals = kwargs['max_function_evals']
plsq = least_squares(gauss_2d_residuals,
coef_guess_mat.ravel(),
args=(fit_region.ravel(), s1.T, s2.T),
kwargs=kwargs,
bounds=(lb_mat.ravel(), ub_mat.ravel()),
jac='2-point', max_nfev=max_function_evals)
coef_fit_mat = np.reshape(plsq.x, (-1, 7))
#if verbose:
# return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq
#else:
return coef_fit_mat
def gauss_2d_residuals(parms_vec, orig_data_mat, x_data, y_data, **kwargs):
"""
Calculates the residual
Parameters
----------
parms_vec : 1D numpy.ndarray
Raveled version of the parameters matrix
orig_data_mat : 2D numpy array
Section of the image being fitted
x_data : 3D numpy.ndarray
y_data : numpy.ndarray
Returns
-------
err_vec : 1D numpy.ndarray
Difference between the original data and the matrix obtained by evaluating parms_vec with x_data_mat
"""
# Only need to reshape the parms from 1D to 2D
parms_mat = np.reshape(parms_vec, (-1, 7))
# print(parms_mat)
err = orig_data_mat - gauss2d(x_data, y_data, *parms_mat, **kwargs).ravel()
return err
def gauss2d(X, Y, *parms, **kwargs):
"""
Calculates a general 2d elliptic gaussian
Parameters
----------
X, Y : the x and y matrix values from the call "X, Y = np.meshgrid(x,y)" where x and y are
defined by x = np.arange(-width/2,width/2) and y = np.arange(-height/2,height/2).
params: List of 7 parameters defining the gaussian.
The parameters are [A, x0, y0, sigma_x, sigma_y, theta, background]
A : amplitude
x0: x position
y0: y position
sigma_x: standard deviation in x
sigma_y: standard deviation in y
theta: rotation angle
background: a background constant
Returns
-------
Returns a width x height matrix of values representing the call to the gaussian function at each position.
"""
symmetric = kwargs['symmetric']
background = kwargs['background']
Z = np.zeros(np.shape(X))
background_value = parms[0][-1] # we can only have one background value for the fit region
for guess in parms:
# each gaussian has a background associated with it but we only use the center atom background
A, x0, y0, sigma_x, sigma_y, theta, background_unused = guess
# determine which type of gaussian we want
if symmetric:
sigma_y = sigma_x
if not background:
background = 0
else:
if not background:
background = 0
# define some variables
a = np.cos(theta) ** 2 / (2 * sigma_x ** 2) + np.sin(theta) ** 2 / (2 * sigma_y ** 2)
b = -np.sin(2 * theta) / (4 * sigma_x ** 2) + np.sin(2 * theta) / (4 * sigma_y ** 2)
c = np.sin(theta) ** 2 / (2 * sigma_x ** 2) + np.cos(theta) ** 2 / (2 * sigma_y ** 2)
# calculate the final value
Z += A * np.exp(- (a * (X - x0) ** 2 - 2 * b * (X - x0) * (Y - y0) + c * (Y - y0) ** 2)) + background_value
return Z
class Gauss_Fit(object):
"""
Initializes the gaussian fitting routines:
fit_motif()
fit_atom_positions_parallel()
write_to_disk()
Parameters
----------
atom_grp : h5py.Group reference
Parent group containing the atom guess positions, cropped clean image and motif positions
fitting_parms : dictionary
Parameters used for atom position fitting
'fit_region_size': region to consider when fitting. Should be large enough to see the nearest neighbors.
'num_nearest_neighbors': the number of nearest neighbors to fit
'sigma_guess': starting guess for gaussian standard deviation. Should be about the size of an atom width in pixels.
'position_range': range that the fitted position can move from initial guess position in pixels
'max_function_evals': maximum allowed function calls; passed to the least squares fitter
'fitting_tolerance': target difference between the fit and the data
'symmetric': flag to signal if a symmetric gaussian is desired (i.e. sigma_x == sigma_y)
'background': flag to signal if a background constant is desired
'movement_allowance': percent of movement allowed (on all parameters except x and y positions
"""
def __init__(self, atom_grp, fitting_parms):
# we should do some initial checks here to ensure the data is at the correct stage for atom fitting
print('Initializing Gauss Fit')
# check that the data is appropriate (does nothing yet)
self.check_data(atom_grp)
# set dtypes
self.atom_coeff_dtype = np.dtype([('type', np.float32),
('amplitude', np.float32),
('x', np.float32),
('y', np.float32),
('sigma_x', np.float32),
('sigma_y', np.float32),
('theta', np.float32),
('background', np.float32)])
self.motif_coeff_dtype = np.dtype([('amplitude', np.float32),
('x', np.float32),
('y', np.float32),
('sigma_x', np.float32),
('sigma_y', np.float32),
('theta', np.float32),
('background', np.float32)])
# initialize some variables
self.atom_grp = atom_grp
self.cropped_clean_image = self.atom_grp['Cropped_Clean_Image'][()]
self.h5_guess = self.atom_grp['Guess_Positions']
self.fitting_parms = fitting_parms
self.win_size = self.atom_grp.attrs['motif_win_size']
half_wind = int(self.win_size * 0.5)
self.motif_centers = self.atom_grp['Motif_Centers'][:] - half_wind # << correction for double cropped image
self.psf_width = self.atom_grp.attrs['psf_width']
# grab the initial guesses
self.all_atom_guesses = np.transpose(np.vstack((self.h5_guess['x'],
self.h5_guess['y'],
self.h5_guess['type'])))
self.num_atoms = self.all_atom_guesses.shape[0] # number of atoms
# build distance matrix
pos_vec = self.all_atom_guesses[:, 0] + 1j * self.all_atom_guesses[:, 1]
pos_mat1 = np.tile(np.transpose(np.atleast_2d(pos_vec)), [1, self.num_atoms])
pos_mat2 = np.transpose(pos_mat1)
d_mat = np.abs(pos_mat2 - pos_mat1) # matrix of distances between all atoms
# sort the distance matrix and keep only the atoms within the nearest neighbor limit
neighbor_dist_order = np.argsort(d_mat)
self.num_nearest_neighbors = self.fitting_parms['num_nearest_neighbors']
# neighbor dist order has the (indices of the) neighbors for each atom sorted by distance
self.closest_neighbors_mat = neighbor_dist_order[:, 1:self.num_nearest_neighbors + 1]
# find which atoms are at the centers of the motifs
self.center_atom_vec = (self.motif_centers[:, 0]) + (1j * self.motif_centers[:, 1])
self.center_atom_dists = [np.abs(pos_vec - item) for item in self.center_atom_vec]
self.center_atom_sorted = np.argsort(self.center_atom_dists)
self.center_atom_indices = self.center_atom_sorted[:, 0]
def fit_atom_positions_parallel(self, plot_results=True, num_cores=None):
"""
Fits the positions of N atoms in parallel
Parameters
----------
plot_results : optional boolean (default is True)
Specifies whether to output a visualization of the fitting results
num_cores : unsigned int (Optional. Default = available logical cores - 2)
Number of cores to compute with
Creates guess_dataset and fit_dataset with the results.
Returns
-------
fit_dataset: NxM numpy array of tuples where N is the number of atoms fit and M is the number of nearest
neighbors considered. Each tuple contains the converged values for each gaussian.
The value names are stored in the dtypes.
"""
t_start = tm.time()
if num_cores is None:
num_cores = recommendCores(self.num_atoms, requested_cores=num_cores, lengthy_computation=False)
print('Setting up guesses')
self.guess_parms = []
for i in range(self.num_atoms):
self.guess_parms.append(self.do_guess(i))
print('Fitting...')
if num_cores > 1:
pool = mp.Pool(processes=num_cores)
parm_list = itt.izip(self.guess_parms, itt.repeat(self.fitting_parms))
chunk = int(self.num_atoms / num_cores)
jobs = pool.imap(do_fit, parm_list, chunksize=chunk)
self.fitting_results = [j for j in jobs]
pool.close()
else:
parm_list = itt.izip(self.guess_parms, itt.repeat(self.fitting_parms))
self.fitting_results = [do_fit(parm) for parm in parm_list]
print ('Finalizing datasets...')
self.guess_dataset = np.zeros(shape=(self.num_atoms, self.num_nearest_neighbors + 1), dtype=self.atom_coeff_dtype)
self.fit_dataset = np.zeros(shape=self.guess_dataset.shape, dtype=self.guess_dataset.dtype)
for atom_ind, single_atom_results in enumerate(self.fitting_results):
types = np.hstack((self.h5_guess['type'][atom_ind], [self.h5_guess['type'][neighbor] for neighbor in self.closest_neighbors_mat[atom_ind]]))
atom_data = np.hstack((np.vstack(types), single_atom_results))
atom_data = [tuple(element) for element in atom_data]
self.fit_dataset[atom_ind] = atom_data
single_atom_guess = self.guess_parms[atom_ind]
atom_guess_data = np.hstack((np.vstack(types), single_atom_guess[1]))
atom_guess_data = [tuple(element) for element in atom_guess_data]
self.guess_dataset[atom_ind] = atom_guess_data
tot_time = np.round(tm.time() - t_start)
print('Took {} sec to find {} atoms with {} cores'.format(tot_time, len(self.fitting_results), num_cores))
# if plotting is desired
if plot_results:
fig, axis = plt.subplots(figsize=(14, 14))
axis.hold(True)
axis.imshow(self.cropped_clean_image, interpolation='none', cmap="gray")
axis.scatter(self.guess_dataset[:, 0]['y'], self.guess_dataset[:, 0]['x'], color='yellow', label='Guess')
axis.scatter(self.fit_dataset[:, 0]['y'], self.fit_dataset[:, 0]['x'], color='red', label='Fit')
axis.legend()
fig.tight_layout()
fig.show()
return self.fit_dataset
def do_guess(self, atom_ind, initial_motifs=False):
"""
Fits the position of a single atom.
Parameters
----------
atom_ind : int
The index of the atom to generate guess parameters for
initial_motifs : optional boolean (default is False)
Specifies whether we are generating guesses for the initial motifs. Subsequent guesses
have the advantage of the fits from the motifs and will be much better starting values.
Returns
-------
atom_ind : int
The index of the atom to generate guess parameters for
coef_guess_mat : 2D numpy array
Initial guess parameters for all the gaussians.
fit_region : 2D numpy array
The fit region cropped from the image
s1 and s2 : 2D numpy arrays
The required input for the X and Y parameters of gauss2d
lb_mat and ub_mat : 2D numpy arrays
The lower and upper bounds for the fitting.
"""
fit_region_size = self.fitting_parms['fit_region_size']
movement_allowance = self.fitting_parms['movement_allowance']
position_range = self.fitting_parms['position_range']
# start writing down initial guesses
x_center_atom = self.h5_guess['x'][atom_ind]
y_center_atom = self.h5_guess['y'][atom_ind]
x_neighbor_atoms = [self.h5_guess['x'][self.closest_neighbors_mat[atom_ind][i]] for i in
range(self.num_nearest_neighbors)]
y_neighbor_atoms = [self.h5_guess['y'][self.closest_neighbors_mat[atom_ind][i]] for i in
range(self.num_nearest_neighbors)]
# select the window we're going to be fitting
x_range = slice(max(int(np.round(x_center_atom - fit_region_size)), 0),
min(int(np.round(x_center_atom + fit_region_size)),
self.cropped_clean_image.shape[0]))
y_range = slice(max(int(np.round(y_center_atom - fit_region_size)), 0),
min(int(np.round(y_center_atom + fit_region_size)),
self.cropped_clean_image.shape[1]))
fit_region = self.cropped_clean_image[x_range, y_range]
# define x and y fitting range
s1, s2 = np.meshgrid(range(x_range.start, x_range.stop),
range(y_range.start, y_range.stop))
# guesses are different if we're fitting the initial windows
if initial_motifs:
# If true, we need to generate more crude guesses
# for the initial motif window fitting.
# Once these have been fit properly they will act
# as the starting point for future guesses.
# put the initial guesses into the proper form
x_guess = np.hstack((x_center_atom, x_neighbor_atoms))
y_guess = np.hstack((y_center_atom, y_neighbor_atoms))
sigma_x_center_atom = self.fitting_parms['sigma_guess']
sigma_y_center_atom = self.fitting_parms['sigma_guess']
sigma_x_neighbor_atoms = [self.fitting_parms['sigma_guess'] for i in
range(self.num_nearest_neighbors)]
sigma_y_neighbor_atoms = [self.fitting_parms['sigma_guess'] for i in
range(self.num_nearest_neighbors)]
theta_center_atom = 0
theta_neighbor_atoms = np.zeros(self.num_nearest_neighbors)
background_center_atom = np.min(fit_region)
# The existence of a background messes up a straight forward gaussian amplitude guess,
# so we add/subtract the background value from the straight forward guess depending
# on if the background is positive or negative.
if np.min(fit_region) < 0:
a_guess = self.cropped_clean_image[
np.rint(x_guess).astype(int), np.rint(y_guess).astype(int)] - background_center_atom
else:
a_guess = self.cropped_clean_image[
np.rint(x_guess).astype(int), np.rint(y_guess).astype(int)] + background_center_atom
sigma_x_guess = np.hstack((sigma_x_center_atom, sigma_x_neighbor_atoms))
sigma_y_guess = np.hstack((sigma_y_center_atom, sigma_y_neighbor_atoms))
theta_guess = np.hstack((theta_center_atom, theta_neighbor_atoms))
background_guess = np.hstack([background_center_atom for num in range(
self.num_nearest_neighbors + 1)]) # we will only need one background
coef_guess_mat = np.transpose(np.vstack((a_guess, x_guess, y_guess, sigma_x_guess, sigma_y_guess,
theta_guess, background_guess)))
else:
# otherwise better guesses are assumed to exist
motif_type = self.h5_guess['type'][atom_ind]
coef_guess_mat = np.copy(self.motif_converged_parms[motif_type])
coef_guess_mat[:, 1] = self.h5_guess['x'][atom_ind] + coef_guess_mat[:, 1]
coef_guess_mat[:, 2] = self.h5_guess['y'][atom_ind] + coef_guess_mat[:, 2]
# Choose upper and lower bounds for the fitting
#
# Address negatives first
lb_a = []
ub_a = []
for item in coef_guess_mat[:, 0]: # amplitudes
if item < 0:
lb_a.append(item + item * movement_allowance)
ub_a.append(item - item * movement_allowance)
else:
lb_a.append(item - item * movement_allowance)
ub_a.append(item + item * movement_allowance)
lb_background = []
ub_background = []
for item in coef_guess_mat[:, 6]: # background
if item < 0:
lb_background.append(item + item * movement_allowance)
ub_background.append(item - item * movement_allowance)
else:
lb_background.append(item - item * movement_allowance)
ub_background.append(item + item * movement_allowance)
# Set up upper and lower bounds:
lb_mat = [lb_a, # amplitude
coef_guess_mat[:, 1] - position_range, # x position
coef_guess_mat[:, 2] - position_range, # y position
[np.max([0, value - value * movement_allowance]) for value in coef_guess_mat[:, 3]], # sigma x
[np.max([0, value - value * movement_allowance]) for value in coef_guess_mat[:, 4]], # sigma y
coef_guess_mat[:, 5] - 2 * 3.14159, # theta
lb_background] # background
ub_mat = [ub_a, # amplitude
coef_guess_mat[:, 1] + position_range, # x position
coef_guess_mat[:, 2] + position_range, # y position
coef_guess_mat[:, 3] + coef_guess_mat[:, 3] * movement_allowance, # sigma x
coef_guess_mat[:, 4] + coef_guess_mat[:, 4] * movement_allowance, # sigma y
coef_guess_mat[:, 5] + 2 * 3.14159, # theta
ub_background] # background
lb_mat = np.transpose(lb_mat)
ub_mat = np.transpose(ub_mat)
check_bounds = False
if check_bounds:
for i, item in enumerate(coef_guess_mat):
for j, value in enumerate(item):
if lb_mat[i][j] > value or ub_mat[i][j] < value:
print('Atom number: {}'.format(atom_ind))
print('Guess: {}'.format(item))
print('Lower bound: {}'.format(lb_mat[i]))
print('Upper bound: {}'.format(ub_mat[i]))
print('dtypes: {}'.format(self.atom_coeff_dtype.names))
raise ValueError('{} guess is out of bounds'.format(self.atom_coeff_dtype.names[j]))
return atom_ind, coef_guess_mat, fit_region, s1, s2, lb_mat, ub_mat
def check_data(self, atom_grp):
# some data checks here
try:
img = atom_grp['Cropped_Clean_Image']
except KeyError:
raise KeyError('The data \'Cropped_Clean_Image\' must exist before fitting')
try:
guesses = atom_grp['Guess_Positions']
except KeyError:
raise KeyError('The data \'Guess_Positions\' must exist before fitting')
try:
motifs = atom_grp['Motif_Centers']
except KeyError:
raise KeyError('The data \'Motif_Centers\' must exist before fitting')
if np.shape(img)[0] < 1 or np.shape(img)[1] < 1:
raise Exception('\'Cropped_Clean_Image\' data must have two dimensions with lengths greater than one')
if len(guesses) < 1:
raise Exception('\'Guess_Positions\' data length must be greater than one')
if len(guesses[0]) < 3:
raise Exception('\'Guess_Positions\' data must have at least three values for each entry: '
'type, x position, and y position')
if motifs.shape[0] < 1:
raise Exception('\'Motif_Centers\' data must contain at least one motif')
if motifs.shape[1] != 2:
raise Exception('\'Motif_Centers\' data is expected to have a shape of (n, 2). '
'The second dimension is not 2.')
def write_to_disk(self):
"""
Writes the gaussian fitting results to disk
Parameters
----------
None
Returns
-------
Returns the atom parent group containing the original data and the newly written data:
Gaussian_Guesses
Gaussian_Fits
Motif_Guesses
Motif_Fits
Nearest_Neighbor_Indices
"""
ds_atom_guesses = MicroDataset('Gaussian_Guesses', data=self.guess_dataset)
ds_atom_fits = MicroDataset('Gaussian_Fits', data=self.fit_dataset)
ds_motif_guesses = MicroDataset('Motif_Guesses', data=self.motif_guess_dataset)
ds_motif_fits = MicroDataset('Motif_Fits', data=self.motif_converged_dataset)
ds_nearest_neighbors = MicroDataset('Nearest_Neighbor_Indices', data=self.closest_neighbors_mat, dtype=np.uint32)
dgrp_atom_finding = MicroDataGroup(self.atom_grp.name.split('/')[-1], parent=self.atom_grp.parent.name)
dgrp_atom_finding.attrs = self.fitting_parms
dgrp_atom_finding.addChildren([ds_atom_guesses, ds_atom_fits, ds_motif_guesses, ds_motif_fits, ds_nearest_neighbors])
hdf = ioHDF5(self.atom_grp.file)
h5_atom_refs = hdf.writeData(dgrp_atom_finding)
hdf.flush()
return self.atom_grp
def fit_motif(self, plot_results=True):
'''
Parameters
----------
plot_results: boolean (default = True)
Flag to specify whether a result summary should be plotted
Returns
-------
motif_converged_dataset: NxM numpy array of tuples where N is the number of motifs and M is the number
of nearest neighbors considered. Each tuple contains the converged parameters for a gaussian fit to
an atom in a motif window.
'''
self.motif_guesses = []
self.motif_parms = []
self.motif_converged_parms = []
self.fit_motifs = []
fit_region = []
# generate final dataset forms
self.motif_guess_dataset = np.zeros(shape=(self.motif_centers.shape[0], self.num_nearest_neighbors + 1),
dtype=self.motif_coeff_dtype)
self.motif_converged_dataset = np.zeros(shape=(self.motif_centers.shape[0], self.num_nearest_neighbors + 1),
dtype=self.motif_coeff_dtype)
for motif in range(len(self.motif_centers)):
# get guesses
self.motif_parms.append(self.do_guess(self.center_atom_indices[motif], initial_motifs=True))
# pull out parameters for generating the gaussians
coef_guess_mat = self.motif_parms[motif][1]
s1 = self.motif_parms[motif][3].T
s2 = self.motif_parms[motif][4].T
fit_region.append(self.motif_parms[motif][2])
# put guesses into final dataset form
self.motif_guess_dataset[motif] = [tuple(element) for element in coef_guess_mat]
# store the guess results for plotting
self.motif_guesses.append(gauss2d(s1, s2, *coef_guess_mat, **self.fitting_parms))
# fit the motif with num_nearest_neighbors + 1 gaussians
parm_list = [self.motif_parms[motif], self.fitting_parms]
fitting_results = do_fit(parm_list)
# store the converged results
self.motif_converged_parms.append(fitting_results)
self.motif_converged_dataset[motif] = [tuple(element) for element in fitting_results]
# store the images of the converged gaussians
self.fit_motifs.append(gauss2d(s1, s2, *fitting_results, **self.fitting_parms))
# calculate the relative atom positions (instead of absolute)
fitting_results[:, 1] = fitting_results[:, 1] - self.motif_centers[motif][0]
fitting_results[:, 2] = fitting_results[:, 2] - self.motif_centers[motif][1]
# plot results if desired
if plot_results:
# initialize the figure
fig, axes = plt.subplots(ncols=3, nrows=len(self.motif_centers), figsize=(14, 6 * len(self.motif_centers)))
for i, ax_row in enumerate(np.atleast_2d(axes)):
# plot the original windows
ax_row[0].imshow(fit_region[i], interpolation='none',
cmap=plot_utils.cmap_jet_white_center())
ax_row[0].set_title('Original Window')
# plot the initial guess windows
ax_row[1].imshow(self.motif_guesses[i], interpolation='none',
cmap=plot_utils.cmap_jet_white_center())
ax_row[1].set_title('Initial Gaussian Guesses')
# plot the converged gaussians
ax_row[2].imshow(self.fit_motifs[i], interpolation='none',
cmap=plot_utils.cmap_jet_white_center())
ax_row[2].set_title('Converged Gaussians')
fig.show()
return self.motif_converged_dataset
# if __name__=='__main__':
# file_name = r"C:\Users\o2d\Documents\pycroscopy\\test_scripts\\testing_gauss_fit\image 04.h5"
# folder_path, file_path = os.path.split(file_name)
#
# file_base_name, file_extension = file_name.rsplit('.')
# h5_file = h5py.File(file_name, mode='r+')
# # look at the data tree in the h5
# '''
# # define a small function called 'print_tree' to look at the folder tree structure
# def print_tree(parent):
# print(parent.name)
# if isinstance(parent, h5py.Group):
# for child in parent:
# print_tree(parent[child])
# '''
#
# #print('Datasets and datagroups within the file:')
# file_handle = h5_file
# #print_tree(file_handle)
#
# cropped_clean_image = h5_file['/Measurement_000/Channel_000/Raw_Data-Windowing_000/Image_Windows-SVD_000/U-Cluster_000/Labels-Atom_Finding_000/Cropped_Clean_Image']
# atom_grp = cropped_clean_image.parent
# guess_params = atom_grp['Guess_Positions']
#
# num_nearest_neighbors = 4
# psf_width = atom_grp.attrs['psf_width']
# win_size = atom_grp.attrs['motif_win_size']
#
# fitting_parms = {'fit_region_size': win_size * 0.5, # region to consider when fitting
# 'num_nearest_neighbors': num_nearest_neighbors,
# 'sigma_guess': 3, # starting guess for gaussian standard deviation
# 'position_range': win_size / 4,# range that the fitted position can go from initial guess position[pixels]
# 'max_function_evals': 100,
# 'fitting_tolerance': 1E-4,
# 'symmetric': True,
# 'background': True,
# 'movement_allowance': 5.0} # percent of movement allowed (on some parameters)
#
# foo = Gauss_Fit(atom_grp, fitting_parms)
#
#
| mit |
sabyasachi087/sp17-i524 | project/S17-IR-P012/code/binarize.py | 21 | 1096 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 4 15:56:31 2017
I524 Project: OCR
Preprocessing
Binarization
@author: saber
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
image_path = 'sample1.png'
image_arr = cv2.imread(image_path, 0)
plt.figure(1)
plt.subplot(311)
# Plot histogram of data
plt.hist(image_arr.flatten())
hist, bin_centers = np.histogram(image_arr)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers[1:]) / weight1
mean2 = np.cumsum((hist * bin_centers[1:]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
img_bin = np.zeros(image_arr.shape)
for i in range(image_arr.shape[0]):
for j in range(image_arr.shape[1]):
if image_arr[i, j] > threshold:
img_bin[i, j] = 255
else:
img_bin[i, j] = 0
#plt.imshow(image_arr)
#plt.imshow(img_bin)
plt.subplot(312)
plt.imshow(image_arr, 'gray')
plt.subplot(313)
plt.imshow(img_bin, 'gray')
| apache-2.0 |
tosolveit/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
ky822/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
siutanwong/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
jaj42/phystream | python/toinfluxdb/toinflux.py | 2 | 3005 | import asyncio
import zmq
import zmq.asyncio
import msgpack
import pandas as pd
zmqhost = '127.0.0.1'
numport = 4201
wavport = 4202
influxhost = '127.0.0.1'
influxport = 8089
#<measurement>[,<tag_key>=<tag_value>[,<tag_key>=<tag_value>]] <field_key>=<field_value>[,<field_key>=<field_value>] [<timestamp>]
def run():
loop = asyncio.get_event_loop()
queue = asyncio.Queue(loop=loop)
ctx = zmq.asyncio.Context()
asyncio.ensure_future(recvFromZmq(loop, ctx, queue, zmqhost, numport, numEncode))
asyncio.ensure_future(recvFromZmq(loop, ctx, queue, zmqhost, wavport, wavEncode))
asyncio.ensure_future(sendToInfluxdb(loop, queue, influxhost, influxport))
loop.run_forever()
async def recvFromZmq(loop, ctx, queue, host, port, encoder):
sock = ctx.socket(zmq.SUB, io_loop=loop)
#sock.connect(f'tcp://{host}:{port}')
sock.bind(f'tcp://{host}:{port}')
sock.subscribe(b'')
while loop.is_running():
msg = await sock.recv()
print(f'Received {msg}')
try:
decoded = decodeMsg(msg)
request = encoder(decoded)
except ValueError:
print(f'Failed to parse: {msg}')
continue
await queue.put(request)
async def sendToInfluxdb(loop, queue, host, port):
udpproto = lambda: asyncio.DatagramProtocol()
transport, proto = await loop.create_datagram_endpoint(udpproto, remote_addr=(host, port))
while loop.is_running():
request = await queue.get()
print(f'Sending: {request}')
transport.sendto(request)
transport.close()
def decodeMsg(msg, wave=False):
# Parse incoming message
topic, msgpackdata = msg.split(b' ', 1)
frame = msgpack.unpackb(msgpackdata, encoding='utf-8')
topic = topic.decode('ASCII')
frame['topic'] = topic
return frame
def numEncode(frame):
# Encode numerics for InfluxDB
tagdata = frame['tags'].copy()
tagdata['origin'] = frame['topic']
tags = [f"{tag}={value}" for tag, value in tagdata.items()]
str_tags = ','.join(tags)
data = frame['data']
fields = [f"{field}={value}" for field, value in data.items()]
str_fields = ','.join(fields)
time = frame['basetime']
line = f'numerics,{str_tags} {str_fields} {time}'
return line.encode('ASCII')
def wavEncode(frame):
# Encode waves for InfluxDB
tagdata = frame['tags'].copy()
tagdata['origin'] = frame['topic']
tags = [f"{tag}={value}" for tag, value in tagdata.items()]
str_tags = ','.join(tags)
#basetime = frame['basetime']
wavedata = pd.DataFrame(frame['data']).set_index('time')
lines = []
for time, waves in wavedata.iterrows():
fields = []
for metric, value in waves.iteritems():
fields.append(f'{metric}={value}')
str_fields = ','.join(fields)
line = f'waves,{str_tags} {str_fields} {time}'
lines.append(line)
request = '\n'.join(lines)
return request.encode('ASCII')
if __name__ == "__main__":
run()
| isc |
ahhda/sympy | examples/intermediate/sample.py | 107 | 3494 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from sympy.core.sympify import sympify, SympifyError
from sympy.external import import_module
np = import_module('numpy')
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except AttributeError:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except AttributeError:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
davidgbe/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
paurichardson/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
elkingtonmcb/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
moberweger/deep-prior-pp | src/data/importers.py | 1 | 49986 | """Provides importer classes for importing data from different datasets.
DepthImporter provides interface for loading the data from a dataset, esp depth images.
ICVLImporter, NYUImporter, MSRAImporter are specific instances of different importers.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import scipy.io
import numpy as np
from PIL import Image
import os
import progressbar as pb
import struct
from data.basetypes import DepthFrame, NamedImgSequence
from util.handdetector import HandDetector
from data.transformations import transformPoints2D
import cPickle
__author__ = "Paul Wohlhart <[email protected]>, Markus Oberweger <[email protected]>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "[email protected]"
__status__ = "Development"
class DepthImporter(object):
"""
provide basic functionality to load depth data
"""
def __init__(self, fx, fy, ux, uy, hand=None):
"""
Initialize object
:param fx: focal length in x direction
:param fy: focal length in y direction
:param ux: principal point in x direction
:param uy: principal point in y direction
"""
self.fx = fx
self.fy = fy
self.ux = ux
self.uy = uy
self.depth_map_size = (320, 240)
self.refineNet = None
self.crop_joint_idx = 0
self.hand = hand
def jointsImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in range(sample.shape[0]):
ret[i] = self.jointImgTo3D(sample[i])
return ret
def jointImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
ret[0] = (sample[0]-self.ux)*sample[2]/self.fx
ret[1] = (sample[1]-self.uy)*sample[2]/self.fy
ret[2] = sample[2]
return ret
def joints3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in range(sample.shape[0]):
ret[i] = self.joint3DToImg(sample[i])
return ret
def joint3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
if sample[2] == 0.:
ret[0] = self.ux
ret[1] = self.uy
return ret
ret[0] = sample[0]/sample[2]*self.fx+self.ux
ret[1] = sample[1]/sample[2]*self.fy+self.uy
ret[2] = sample[2]
return ret
def getCameraProjection(self):
"""
Get homogenous camera projection matrix
:return: 4x4 camera projection matrix
"""
ret = np.zeros((4, 4), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = self.fy
ret[2, 2] = 1.
ret[0, 2] = self.ux
ret[1, 2] = self.uy
ret[3, 2] = 1.
return ret
def getCameraIntrinsics(self):
"""
Get intrinsic camera matrix
:return: 3x3 intrinsic camera matrix
"""
ret = np.zeros((3, 3), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = self.fy
ret[2, 2] = 1.
ret[0, 2] = self.ux
ret[1, 2] = self.uy
return ret
def showAnnotatedDepth(self, frame):
"""
Show the depth image
:param frame: image to show
:return:
"""
raise NotImplementedError("Must be overloaded by base!")
@staticmethod
def depthToPCL(dpt, T, background_val=0.):
# get valid points and transform
pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
pts = np.concatenate([pts[:, [1, 0]] + 0.5, np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))
# replace the invalid data
depth = dpt[(~np.isclose(dpt, background_val))]
# get x and y data in a vectorized way
row = (pts[:, 0] - 160.) / 241.42 * depth
col = (pts[:, 1] - 120.) / 241.42 * depth
# combine x,y,depth
return np.column_stack((row, col, depth))
def loadRefineNetLazy(self, net):
if isinstance(net, basestring):
if os.path.exists(net):
from net.scalenet import ScaleNet, ScaleNetParams
comrefNetParams = ScaleNetParams(type=5, nChan=1, wIn=128, hIn=128, batchSize=1, resizeFactor=2,
numJoints=1, nDims=3)
self.refineNet = ScaleNet(np.random.RandomState(23455), cfgParams=comrefNetParams)
self.refineNet.load(net)
else:
raise EnvironmentError("File not found: {}".format(net))
class ICVLImporter(DepthImporter):
"""
provide functionality to load data from the ICVL dataset
"""
def __init__(self, basepath, useCache=True, cacheDir='./cache/', refineNet=None, hand=None):
"""
Constructor
:param basepath: base path of the ICVL dataset
:return:
"""
super(ICVLImporter, self).__init__(241.42, 241.42, 160., 120., hand) # see Qian et.al.
self.depth_map_size = (320, 240)
self.basepath = basepath
self.useCache = useCache
self.cacheDir = cacheDir
self.numJoints = 16
self.crop_joint_idx = 0
self.refineNet = refineNet
self.default_cubes = {'train': (250, 250, 250),
'test_seq_1': (250, 250, 250),
'test_seq_2': (250, 250, 250)}
self.sides = {'train': 'right', 'test_seq1': 'right', 'test_seq_2': 'right'}
def loadDepthMap(self, filename):
"""
Read a depth-map
:param filename: file name to load
:return: image data of depth image
"""
img = Image.open(filename) # open image
assert len(img.getbands()) == 1 # ensure depth image
imgdata = np.asarray(img, np.float32)
return imgdata
def getDepthMapNV(self):
"""
Get the value of invalid depth values in the depth map
:return: value
"""
return 32001
def loadSequence(self, seqName, subSeq=None, Nmax=float('inf'), shuffle=False, rng=None, docom=False, cube=None):
"""
Load an image sequence from the dataset
:param seqName: sequence name, e.g. train
:param subSeq: list of subsequence names, e.g. 0, 45, 122-5
:param Nmax: maximum number of samples to load
:return: returns named image sequence
"""
if (subSeq is not None) and (not isinstance(subSeq, list)):
raise TypeError("subSeq must be None or list")
if cube is None:
config = {'cube': self.default_cubes[seqName]}
else:
assert isinstance(cube, tuple)
assert len(cube) == 3
config = {'cube': cube}
if subSeq is None:
pickleCache = '{}/{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName, self.hand,
HandDetector.detectionModeToString(docom, self.refineNet is not None), config['cube'][0])
else:
pickleCache = '{}/{}_{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName,
''.join(subSeq), self.hand,
HandDetector.detectionModeToString(docom, self.refineNet is not None), config['cube'][0])
if self.useCache:
if os.path.isfile(pickleCache):
print("Loading cache data from {}".format(pickleCache))
f = open(pickleCache, 'rb')
(seqName, data, config) = cPickle.load(f)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
if not(np.isinf(Nmax)):
return NamedImgSequence(seqName, data[0:Nmax], config)
else:
return NamedImgSequence(seqName, data, config)
# check for multiple subsequences
if subSeq is not None:
if len(subSeq) > 1:
missing = False
for i in range(len(subSeq)):
if not os.path.isfile('{}/{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__,
seqName, self.hand, subSeq[i],
HandDetector.detectionModeToString(docom, self.refineNet is not None))):
missing = True
print("missing: {}".format(subSeq[i]))
break
if not missing:
# load first data
pickleCache = '{}/{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__,
seqName, self.hand, subSeq[0],
HandDetector.detectionModeToString(docom, self.refineNet is not None))
print("Loading cache data from {}".format(pickleCache))
f = open(pickleCache, 'rb')
(seqName, fullData, config) = cPickle.load(f)
f.close()
# load rest of data
for i in range(1, len(subSeq)):
pickleCache = '{}/{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__,
seqName, self.hand, subSeq[i],
HandDetector.detectionModeToString(docom, self.refineNet is not None))
print("Loading cache data from {}".format(pickleCache))
f = open(pickleCache, 'rb')
(seqName, data, config) = cPickle.load(f)
fullData.extend(data)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(fullData)
if not(np.isinf(Nmax)):
return NamedImgSequence(seqName, fullData[0:Nmax], config)
else:
return NamedImgSequence(seqName, fullData, config)
self.loadRefineNetLazy(self.refineNet)
# Load the dataset
objdir = '{}/Depth/'.format(self.basepath)
trainlabels = '{}/{}.txt'.format(self.basepath, seqName)
inputfile = open(trainlabels)
txt = 'Loading {}'.format(seqName)
pbar = pb.ProgressBar(maxval=len(inputfile.readlines()), widgets=[txt, pb.Percentage(), pb.Bar()])
pbar.start()
inputfile.seek(0)
data = []
i = 0
for line in inputfile:
# early stop
if len(data) >= Nmax:
break
part = line.split(' ')
# check for subsequences and skip them if necessary
subSeqName = ''
if subSeq is not None:
p = part[0].split('/')
# handle original data (unrotated '0') separately
if ('0' in subSeq) and len(p[0]) > 6:
pass
elif not('0' in subSeq) and len(p[0]) > 6:
i += 1
continue
elif (p[0] in subSeq) and len(p[0]) <= 6:
pass
elif not(p[0] in subSeq) and len(p[0]) <= 6:
i += 1
continue
if len(p[0]) <= 6:
subSeqName = p[0]
else:
subSeqName = '0'
dptFileName = '{}/{}'.format(objdir, part[0])
if not os.path.isfile(dptFileName):
print("File {} does not exist!".format(dptFileName))
i += 1
continue
dpt = self.loadDepthMap(dptFileName)
if self.hand is not None:
if self.hand != self.sides[seqName]:
raise NotImplementedError()
dpt = dpt[:, ::-1]
# joints in image coordinates
gtorig = np.zeros((self.numJoints, 3), np.float32)
for joint in range(self.numJoints):
for xyz in range(0, 3):
gtorig[joint, xyz] = part[joint*3+xyz+1]
# normalized joints in 3D coordinates
gt3Dorig = self.jointsImgTo3D(gtorig)
# print gt3D
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtorig,0,gt3Dorig,gt3Dcrop,0,dptFileName,subSeqName,''))
# Detect hand
hd = HandDetector(dpt, self.fx, self.fy, refineNet=self.refineNet, importer=self)
if not hd.checkImage(1):
print("Skipping image {}, no content".format(dptFileName))
i += 1
continue
try:
dpt, M, com = hd.cropArea3D(com=gtorig[self.crop_joint_idx], size=config['cube'], docom=docom)
except UserWarning:
print("Skipping image {}, no hand detected".format(dptFileName))
i += 1
continue
com3D = self.jointImgTo3D(com)
gt3Dcrop = gt3Dorig - com3D # normalize to com
gtcrop = transformPoints2D(gtorig, M)
# print("{}".format(gt3Dorig))
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtcrop,M,gt3Dorig,gt3Dcrop,com3D,dptFileName,subSeqName,''))
data.append(DepthFrame(dpt.astype(np.float32), gtorig, gtcrop, M, gt3Dorig, gt3Dcrop, com3D, dptFileName,
subSeqName, 'left', {}))
pbar.update(i)
i += 1
inputfile.close()
pbar.finish()
print("Loaded {} samples.".format(len(data)))
if self.useCache:
print("Save cache data to {}".format(pickleCache))
f = open(pickleCache, 'wb')
cPickle.dump((seqName, data, config), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
return NamedImgSequence(seqName, data, config)
def loadBaseline(self, filename, firstName=False):
"""
Load baseline data
:param filename: file name of data
:return: list with joint coordinates
"""
def nonblank_lines(f):
for l in f:
line = l.rstrip()
if line:
yield line
inputfile = open(filename)
inputfile.seek(0)
if firstName == True:
off = 1
else:
off = 0
data = []
for line in nonblank_lines(inputfile):
part = line.strip().split(' ')
# joints in image coordinates
ev = np.zeros((self.numJoints, 3), np.float32)
for joint in range(ev.shape[0]):
for xyz in range(0, 3):
ev[joint, xyz] = part[joint*3+xyz+off]
gt3Dworld = self.jointsImgTo3D(ev)
data.append(gt3Dworld)
return data
def loadBaseline2D(self, filename, firstName=False):
"""
Load baseline data
:param filename: file name of data
:return: list with joint coordinates
"""
inputfile = open(filename)
inputfile.seek(0)
if firstName is True:
off = 1
else:
off = 0
data = []
for line in inputfile:
part = line.split(' ')
# joints in image coordinates
ev = np.zeros((self.numJoints,2),np.float32)
for joint in range(ev.shape[0]):
for xyz in range(0, 2):
ev[joint,xyz] = part[joint*3+xyz+off]
data.append(ev)
return data
def showAnnotatedDepth(self, frame):
"""
Show the depth image
:param frame: image to show
:return:
"""
import matplotlib
import matplotlib.pyplot as plt
print("img min {}, max {}".format(frame.dpt.min(), frame.dpt.max()))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(frame.dpt, cmap=matplotlib.cm.jet, interpolation='nearest')
ax.scatter(frame.gtcrop[:, 0], frame.gtcrop[:, 1])
ax.plot(frame.gtcrop[0:4, 0], frame.gtcrop[0:4, 1], c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[4:7, 0])),
np.hstack((frame.gtcrop[0, 1], frame.gtcrop[4:7, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[7:10, 0])),
np.hstack((frame.gtcrop[0, 1], frame.gtcrop[7:10, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[10:13, 0])),
np.hstack((frame.gtcrop[0, 1], frame.gtcrop[10:13, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[13:16, 0])),
np.hstack((frame.gtcrop[0, 1], frame.gtcrop[13:16, 1])), c='r')
def format_coord(x, y):
numrows, numcols = frame.dpt.shape
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < numcols and row >= 0 and row < numrows:
z = frame.dpt[row, col]
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f' % (x, y)
ax.format_coord = format_coord
for i in range(frame.gtcrop.shape[0]):
ax.annotate(str(i), (int(frame.gtcrop[i, 0]), int(frame.gtcrop[i, 1])))
plt.show()
class MSRA15Importer(DepthImporter):
"""
provide functionality to load data from the MSRA 2015 dataset
faulty images:
- P2/TIP: 172, 173,174
- P2/MP: 173, 174, 175, 345-354, 356, 359, 360
- P3/T: 120, 489
- P8/4: 168
"""
def __init__(self, basepath, useCache=True, cacheDir='./cache/', refineNet=None, detectorNet=None, derotNet=None, hand=None):
"""
Constructor
:param basepath: base path of the MSRA dataset
:return:
"""
super(MSRA15Importer, self).__init__(241.42, 241.42, 160., 120., hand) # see Sun et.al.
self.depth_map_size = (320, 240)
self.basepath = basepath
self.useCache = useCache
self.cacheDir = cacheDir
self.refineNet = refineNet
self.derotNet = derotNet
self.detectorNet = detectorNet
self.numJoints = 21
self.crop_joint_idx = 5
self.default_cubes = {'P0': (200, 200, 200),
'P1': (200, 200, 200),
'P2': (200, 200, 200),
'P3': (180, 180, 180),
'P4': (180, 180, 180),
'P5': (180, 180, 180),
'P6': (170, 170, 170),
'P7': (160, 160, 160),
'P8': (150, 150, 150)}
self.sides = {'P0': 'right', 'P1': 'right', 'P2': 'right', 'P3': 'right', 'P4': 'right', 'P5': 'right',
'P6': 'right', 'P7': 'right', 'P8': 'right'}
def loadDepthMap(self, filename):
"""
Read a depth-map
:param filename: file name to load
:return: image data of depth image
"""
with open(filename, 'rb') as f:
# first 6 uint define the full image
width = struct.unpack('i', f.read(4))[0]
height = struct.unpack('i', f.read(4))[0]
left = struct.unpack('i', f.read(4))[0]
top = struct.unpack('i', f.read(4))[0]
right = struct.unpack('i', f.read(4))[0]
bottom = struct.unpack('i', f.read(4))[0]
patch = np.fromfile(f, dtype='float32', sep="")
imgdata = np.zeros((height, width), dtype='float32')
imgdata[top:bottom, left:right] = patch.reshape([bottom-top, right-left])
return imgdata
def getDepthMapNV(self):
"""
Get the value of invalid depth values in the depth map
:return: value
"""
return 32001
def loadSequence(self, seqName, subSeq=None, Nmax=float('inf'), shuffle=False, rng=None, docom=False, cube=None):
"""
Load an image sequence from the dataset
:param seqName: sequence name, e.g. subject1
:param Nmax: maximum number of samples to load
:return: returns named image sequence
"""
if (subSeq is not None) and (not isinstance(subSeq, list)):
raise TypeError("subSeq must be None or list")
if cube is None:
config = {'cube': self.default_cubes[seqName]}
else:
assert isinstance(cube, tuple)
assert len(cube) == 3
config = {'cube': cube}
if subSeq is None:
pickleCache = '{}/{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName, self.hand,
HandDetector.detectionModeToString(docom, self.refineNet is not None), config['cube'][0])
else:
pickleCache = '{}/{}_{}_{}_{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName, self.hand,
''.join(subSeq), HandDetector.detectionModeToString(docom, self.refineNet is not None), config['cube'][0])
if self.useCache & os.path.isfile(pickleCache):
print("Loading cache data from {}".format(pickleCache))
f = open(pickleCache, 'rb')
(seqName, data, config) = cPickle.load(f)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
if not(np.isinf(Nmax)):
return NamedImgSequence(seqName, data[0:Nmax], config)
else:
return NamedImgSequence(seqName, data, config)
self.loadRefineNetLazy(self.refineNet)
# Load the dataset
objdir = '{}/{}/'.format(self.basepath, seqName)
subdirs = sorted([name for name in os.listdir(objdir) if os.path.isdir(os.path.join(objdir, name))])
txt = 'Loading {}'.format(seqName)
nImgs = sum([len(files) for r, d, files in os.walk(objdir)]) // 2
pbar = pb.ProgressBar(maxval=nImgs, widgets=[txt, pb.Percentage(), pb.Bar()])
pbar.start()
data = []
pi = 0
for subdir in subdirs:
# check for subsequences and skip them if necessary
subSeqName = ''
if subSeq is not None:
if subdir not in subSeq:
continue
subSeqName = subdir
# iterate all subdirectories
trainlabels = '{}/{}/joint.txt'.format(objdir, subdir)
inputfile = open(trainlabels)
# read number of samples
nImgs = int(inputfile.readline())
for i in range(nImgs):
# early stop
if len(data) >= Nmax:
break
line = inputfile.readline()
part = line.split(' ')
dptFileName = '{}/{}/{}_depth.bin'.format(objdir, subdir, str(i).zfill(6))
if not os.path.isfile(dptFileName):
print("File {} does not exist!".format(dptFileName))
continue
dpt = self.loadDepthMap(dptFileName)
# joints in image coordinates
gt3Dorig = np.zeros((self.numJoints, 3), np.float32)
for joint in range(gt3Dorig.shape[0]):
for xyz in range(0, 3):
gt3Dorig[joint, xyz] = part[joint*3+xyz]
# invert axis
# gt3Dorig[:, 0] *= (-1.)
# gt3Dorig[:, 1] *= (-1.)
gt3Dorig[:, 2] *= (-1.)
# normalized joints in 3D coordinates
gtorig = self.joints3DToImg(gt3Dorig)
if self.hand is not None:
if self.hand != self.sides[seqName]:
gtorig[:, 0] -= dpt.shape[1] / 2.
gtorig[:, 0] *= (-1)
gtorig[:, 0] += dpt.shape[1] / 2.
gt3Dorig = self.jointsImgTo3D(gtorig)
dpt = dpt[:, ::-1]
# print gt3D
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtorig,0,gt3Dorig,gt3Dcrop,com3D,dptFileName,'',''))
# Detect hand
hd = HandDetector(dpt, self.fx, self.fy, refineNet=self.refineNet, importer=self)
if not hd.checkImage(1.):
print("Skipping image {}, no content".format(dptFileName))
continue
try:
dpt, M, com = hd.cropArea3D(com=gtorig[self.crop_joint_idx], size=config['cube'], docom=docom)
except UserWarning:
print("Skipping image {}, no hand detected".format(dptFileName))
continue
com3D = self.jointImgTo3D(com)
gt3Dcrop = gt3Dorig - com3D # normalize to com
gtcrop = transformPoints2D(gtorig, M)
# print("{}".format(gt3Dorig))
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtcrop,M,gt3Dorig,gt3Dcrop,com3D,dptFileName,'','',{}))
data.append(DepthFrame(dpt.astype(np.float32), gtorig, gtcrop, M, gt3Dorig, gt3Dcrop, com3D,
dptFileName, subSeqName, self.sides[seqName], {}))
pbar.update(pi)
pi += 1
inputfile.close()
pbar.finish()
print("Loaded {} samples.".format(len(data)))
if self.useCache:
print("Save cache data to {}".format(pickleCache))
f = open(pickleCache, 'wb')
cPickle.dump((seqName, data, config), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
return NamedImgSequence(seqName, data, config)
def jointsImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in xrange(sample.shape[0]):
ret[i] = self.jointImgTo3D(sample[i])
return ret
def jointImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
ret[0] = (sample[0] - self.ux) * sample[2] / self.fx
ret[1] = (self.uy - sample[1]) * sample[2] / self.fy
ret[2] = sample[2]
return ret
def joints3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in xrange(sample.shape[0]):
ret[i] = self.joint3DToImg(sample[i])
return ret
def joint3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((3, ), np.float32)
if sample[2] == 0.:
ret[0] = self.ux
ret[1] = self.uy
return ret
ret[0] = sample[0]/sample[2]*self.fx+self.ux
ret[1] = self.uy-sample[1]/sample[2]*self.fy
ret[2] = sample[2]
return ret
def getCameraIntrinsics(self):
"""
Get intrinsic camera matrix
:return: 3x3 intrinsic camera matrix
"""
ret = np.zeros((3, 3), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = -self.fy
ret[2, 2] = 1
ret[0, 2] = self.ux
ret[1, 2] = self.uy
return ret
def getCameraProjection(self):
"""
Get homogenous camera projection matrix
:return: 4x4 camera projection matrix
"""
ret = np.zeros((4, 4), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = -self.fy
ret[2, 2] = 1.
ret[0, 2] = self.ux
ret[1, 2] = self.uy
ret[3, 2] = 1.
return ret
def showAnnotatedDepth(self, frame):
"""
Show the depth image
:param frame: image to show
:return:
"""
import matplotlib
import matplotlib.pyplot as plt
print("img min {}, max {}".format(frame.dpt.min(),frame.dpt.max()))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(frame.dpt, cmap=matplotlib.cm.jet, interpolation='nearest')
ax.scatter(frame.gtcrop[:, 0], frame.gtcrop[:, 1])
ax.plot(frame.gtcrop[0:5, 0], frame.gtcrop[0:5, 1], c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[5:9, 0])), np.hstack((frame.gtcrop[0, 1], frame.gtcrop[5:9, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[9:13, 0])), np.hstack((frame.gtcrop[0, 1], frame.gtcrop[9:13, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[13:17, 0])), np.hstack((frame.gtcrop[0, 1], frame.gtcrop[13:17, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[0, 0], frame.gtcrop[17:21, 0])), np.hstack((frame.gtcrop[0, 1], frame.gtcrop[17:21, 1])), c='r')
def format_coord(x, y):
numrows, numcols = frame.dpt.shape
col = int(x+0.5)
row = int(y+0.5)
if 0 <= col < numcols and 0 <= row < numrows:
z = frame.dpt[row, col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
ax.format_coord = format_coord
for i in range(frame.gtcrop.shape[0]):
ax.annotate(str(i), (int(frame.gtcrop[i, 0]), int(frame.gtcrop[i, 1])))
plt.show()
@staticmethod
def depthToPCL(dpt, T, background_val=0.):
# get valid points and transform
pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
pts = np.concatenate([pts[:, [1, 0]] + 0.5, np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))
# replace the invalid data
depth = dpt[(~np.isclose(dpt, background_val))]
# get x and y data in a vectorized way
row = (pts[:, 0] - 160.) / 241.42 * depth
col = (120. - pts[:, 1]) / 241.42 * depth
# combine x,y,depth
return np.column_stack((row, col, depth))
class NYUImporter(DepthImporter):
"""
provide functionality to load data from the NYU hand dataset
"""
def __init__(self, basepath, useCache=True, cacheDir='./cache/', refineNet=None,
allJoints=False, hand=None):
"""
Constructor
:param basepath: base path of the ICVL dataset
:return:
"""
super(NYUImporter, self).__init__(588.03, 587.07, 320., 240., hand)
self.depth_map_size = (640, 480)
self.basepath = basepath
self.useCache = useCache
self.cacheDir = cacheDir
self.allJoints = allJoints
self.numJoints = 36
if self.allJoints:
self.crop_joint_idx = 32
else:
self.crop_joint_idx = 13
self.default_cubes = {'train': (300, 300, 300),
'test_1': (300, 300, 300),
'test_2': (250, 250, 250),
'test': (300, 300, 300),
'train_synth': (300, 300, 300),
'test_synth_1': (300, 300, 300),
'test_synth_2': (250, 250, 250),
'test_synth': (300, 300, 300)}
self.sides = {'train': 'right', 'test_1': 'right', 'test_2': 'right', 'test': 'right', 'train_synth': 'right',
'test_synth_1': 'right', 'test_synth_2': 'right', 'test_synth': 'right'}
# joint indices used for evaluation of Tompson et al.
self.restrictedJointsEval = [0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32]
self.refineNet = refineNet
def loadDepthMap(self, filename):
"""
Read a depth-map
:param filename: file name to load
:return: image data of depth image
"""
img = Image.open(filename)
# top 8 bits of depth are packed into green channel and lower 8 bits into blue
assert len(img.getbands()) == 3
r, g, b = img.split()
r = np.asarray(r, np.int32)
g = np.asarray(g, np.int32)
b = np.asarray(b, np.int32)
dpt = np.bitwise_or(np.left_shift(g, 8), b)
imgdata = np.asarray(dpt, np.float32)
return imgdata
def getDepthMapNV(self):
"""
Get the value of invalid depth values in the depth map
:return: value
"""
return 32001
def loadSequence(self, seqName, Nmax=float('inf'), shuffle=False, rng=None, docom=False, cube=None):
"""
Load an image sequence from the dataset
:param seqName: sequence name, e.g. train
:param Nmax: maximum number of samples to load
:return: returns named image sequence
"""
if cube is None:
config = {'cube': self.default_cubes[seqName]}
else:
assert isinstance(cube, tuple)
assert len(cube) == 3
config = {'cube': cube}
pickleCache = '{}/{}_{}_{}_{}_{}_{}__cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName,
self.hand, self.allJoints,
HandDetector.detectionModeToString(docom, self.refineNet is not None), config['cube'][0])
if self.useCache:
if os.path.isfile(pickleCache):
print("Loading cache data from {}".format(pickleCache))
f = open(pickleCache, 'rb')
(seqName, data, config) = cPickle.load(f)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
if not(np.isinf(Nmax)):
return NamedImgSequence(seqName, data[0:Nmax], config)
else:
return NamedImgSequence(seqName, data, config)
self.loadRefineNetLazy(self.refineNet)
# Load the dataset
objdir = '{}/{}/'.format(self.basepath, seqName)
trainlabels = '{}/{}/joint_data.mat'.format(self.basepath, seqName)
mat = scipy.io.loadmat(trainlabels)
joints3D = mat['joint_xyz'][0]
joints2D = mat['joint_uvd'][0]
if self.allJoints:
eval_idxs = np.arange(36)
else:
eval_idxs = self.restrictedJointsEval
self.numJoints = len(eval_idxs)
txt = 'Loading {}'.format(seqName)
pbar = pb.ProgressBar(maxval=joints3D.shape[0], widgets=[txt, pb.Percentage(), pb.Bar()])
pbar.start()
data = []
i = 0
for line in range(joints3D.shape[0]):
dptFileName = '{0:s}/depth_1_{1:07d}.png'.format(objdir, line+1)
if not os.path.isfile(dptFileName):
print("File {} does not exist!".format(dptFileName))
i += 1
continue
dpt = self.loadDepthMap(dptFileName)
if self.hand is not None:
if self.hand != self.sides[seqName]:
raise NotImplementedError()
dpt = dpt[:, ::-1]
# joints in image coordinates
gtorig = np.zeros((self.numJoints, 3), np.float32)
jt = 0
for ii in range(joints2D.shape[1]):
if ii not in eval_idxs:
continue
gtorig[jt, 0] = joints2D[line, ii, 0]
gtorig[jt, 1] = joints2D[line, ii, 1]
gtorig[jt, 2] = joints2D[line, ii, 2]
jt += 1
# normalized joints in 3D coordinates
gt3Dorig = np.zeros((self.numJoints, 3), np.float32)
jt = 0
for jj in range(joints3D.shape[1]):
if jj not in eval_idxs:
continue
gt3Dorig[jt, 0] = joints3D[line, jj, 0]
gt3Dorig[jt, 1] = joints3D[line, jj, 1]
gt3Dorig[jt, 2] = joints3D[line, jj, 2]
jt += 1
# print gt3D
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtorig,0,gt3Dorig,gt3Dorig,0,dptFileName,'',''))
# Detect hand
hd = HandDetector(dpt, self.fx, self.fy, refineNet=self.refineNet, importer=self)
if not hd.checkImage(1):
print("Skipping image {}, no content".format(dptFileName))
i += 1
continue
try:
dpt, M, com = hd.cropArea3D(com=gtorig[self.crop_joint_idx], size=config['cube'], docom=docom)
except UserWarning:
print("Skipping image {}, no hand detected".format(dptFileName))
i += 1
continue
com3D = self.jointImgTo3D(com)
gt3Dcrop = gt3Dorig - com3D # normalize to com
gtcrop = transformPoints2D(gtorig, M)
# print("{}".format(gt3Dorig))
# self.showAnnotatedDepth(DepthFrame(dpt,gtorig,gtcrop,M,gt3Dorig,gt3Dcrop,com3D,dptFileName,'','',{}))
data.append(DepthFrame(dpt.astype(np.float32), gtorig, gtcrop, M, gt3Dorig, gt3Dcrop, com3D, dptFileName,
'', self.sides[seqName], {}))
pbar.update(i)
i += 1
# early stop
if len(data) >= Nmax:
break
pbar.finish()
print("Loaded {} samples.".format(len(data)))
if self.useCache:
print("Save cache data to {}".format(pickleCache))
f = open(pickleCache, 'wb')
cPickle.dump((seqName, data, config), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# shuffle data
if shuffle and rng is not None:
print("Shuffling")
rng.shuffle(data)
return NamedImgSequence(seqName, data, config)
def loadBaseline(self, filename, gt=None):
"""
Load baseline data
:param filename: file name of data
:return: list with joint coordinates
"""
if gt is not None:
mat = scipy.io.loadmat(filename)
names = mat['conv_joint_names'][0]
joints = mat['pred_joint_uvconf'][0]
self.numJoints = names.shape[0]
data = []
for dat in range(min(joints.shape[0], gt.shape[0])):
fname = '{0:s}/depth_1_{1:07d}.png'.format(os.path.split(filename)[0], dat+1)
if not os.path.isfile(fname):
continue
dm = self.loadDepthMap(fname)
# joints in image coordinates
ev = np.zeros((self.numJoints, 3), np.float32)
jt = 0
for i in range(joints.shape[1]):
if np.count_nonzero(joints[dat, i, :]) == 0:
continue
ev[jt, 0] = joints[dat, i, 0]
ev[jt, 1] = joints[dat, i, 1]
ev[jt, 2] = dm[int(ev[jt, 1]), int(ev[jt, 0])]
jt += 1
for jt in range(ev.shape[0]):
#if ev[jt,2] == 2001. or ev[jt,2] == 0.:
if abs(ev[jt, 2] - gt[dat, 13, 2]) > 150.:
ev[jt, 2] = gt[dat, jt, 2]#np.clip(ev[jt,2],gt[dat,13,2]-150.,gt[dat,13,2]+150.) # set to groundtruth if unknown
ev3D = self.jointsImgTo3D(ev)
data.append(ev3D)
return data
else:
def nonblank_lines(f):
for l in f:
line = l.rstrip()
if line:
yield line
inputfile = open(filename)
# first line specifies the number of 3D joints
self.numJoints = len(inputfile.readline().split(' ')) / 3
inputfile.seek(0)
data = []
for line in nonblank_lines(inputfile):
part = line.split(' ')
# joints in image coordinates
ev = np.zeros((self.numJoints, 3), np.float32)
for joint in range(ev.shape[0]):
for xyz in range(0, 3):
ev[joint, xyz] = part[joint*3+xyz]
gt3Dworld = self.jointsImgTo3D(ev)
data.append(gt3Dworld)
return data
def loadBaseline2D(self, filename):
"""
Load baseline data
:param filename: file name of data
:return: list with joint coordinates
"""
mat = scipy.io.loadmat(filename)
names = mat['conv_joint_names'][0]
joints = mat['pred_joint_uvconf'][0]
self.numJoints = names.shape[0]
data = []
for dat in range(joints.shape[0]):
# joints in image coordinates
ev = np.zeros((self.numJoints, 2), np.float32)
jt = 0
for i in range(joints.shape[1]):
if np.count_nonzero(joints[dat, i, :]) == 0:
continue
ev[jt, 0] = joints[dat, i, 0]
ev[jt, 1] = joints[dat, i, 1]
jt += 1
data.append(ev)
return data
def jointsImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in xrange(sample.shape[0]):
ret[i] = self.jointImgTo3D(sample[i])
return ret
def jointImgTo3D(self, sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
ret[0] = (sample[0] - self.ux) * sample[2] / self.fx
ret[1] = (self.uy - sample[1]) * sample[2] / self.fy
ret[2] = sample[2]
return ret
def joints3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in xrange(sample.shape[0]):
ret[i] = self.joint3DToImg(sample[i])
return ret
def joint3DToImg(self, sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((3, ), np.float32)
if sample[2] == 0.:
ret[0] = self.ux
ret[1] = self.uy
return ret
ret[0] = sample[0]/sample[2]*self.fx+self.ux
ret[1] = self.uy-sample[1]/sample[2]*self.fy
ret[2] = sample[2]
return ret
def getCameraIntrinsics(self):
"""
Get intrinsic camera matrix
:return: 3x3 intrinsic camera matrix
"""
ret = np.zeros((3, 3), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = -self.fy
ret[2, 2] = 1
ret[0, 2] = self.ux
ret[1, 2] = self.uy
return ret
def getCameraProjection(self):
"""
Get homogenous camera projection matrix
:return: 4x4 camera projection matrix
"""
ret = np.zeros((4, 4), np.float32)
ret[0, 0] = self.fx
ret[1, 1] = -self.fy
ret[2, 2] = 1.
ret[0, 2] = self.ux
ret[1, 2] = self.uy
ret[3, 2] = 1.
return ret
def showAnnotatedDepth(self, frame):
"""
Show the depth image
:param frame: image to show
:return:
"""
import matplotlib
import matplotlib.pyplot as plt
print("img min {}, max {}".format(frame.dpt.min(), frame.dpt.max()))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(frame.dpt, cmap=matplotlib.cm.jet, interpolation='nearest')
ax.scatter(frame.gtcrop[:, 0], frame.gtcrop[:, 1])
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[1::-1, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[1::-1, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[3:1:-1, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[3:1:-1, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[5:3:-1, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[5:3:-1, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[7:5:-1, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[7:5:-1, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[10:7:-1, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[10:7:-1, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[11, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[11, 1])), c='r')
ax.plot(np.hstack((frame.gtcrop[13, 0], frame.gtcrop[12, 0])), np.hstack((frame.gtcrop[13, 1], frame.gtcrop[12, 1])), c='r')
def format_coord(x, y):
numrows, numcols = frame.dpt.shape
col = int(x+0.5)
row = int(y+0.5)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = frame.dpt[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f' % (x, y)
ax.format_coord = format_coord
for i in range(frame.gtcrop.shape[0]):
ax.annotate(str(i), (int(frame.gtcrop[i, 0]), int(frame.gtcrop[i, 1])))
plt.show()
@staticmethod
def depthToPCL(dpt, T, background_val=0.):
# get valid points and transform
pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
pts = np.concatenate([pts[:, [1, 0]] + 0.5, np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))
# replace the invalid data
depth = dpt[(~np.isclose(dpt, background_val))]
# get x and y data in a vectorized way
row = (pts[:, 0] - 320.) / 588.03 * depth
col = (240. - pts[:, 1]) / 587.07 * depth
# combine x,y,depth
return np.column_stack((row, col, depth))
| gpl-3.0 |
aetilley/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
astyl/wxPlotLab | mplotlab/graphics/Navigation.py | 2 | 2676 | # -*-coding:Utf-8 -*
from mplotlab import App
from matplotlib.backend_bases import NavigationToolbar2
import wx
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class Navigation(NavigationToolbar2):
def __init__(self,*a,**k):
NavigationToolbar2.__init__(self, *a,**k)
def _init_toolbar(self,*args,**kwargs):
pass
def set_message(self,s):
""" display in the status bar
the mouseover data (x,y)
"""
try:
App().mainWin.GetStatusBar().SetStatusText(s,0)
except:
pass
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
self.wxoverlay = wx.Overlay()
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
self.wxoverlay.Reset()
del self.wxoverlay
def draw_rubberband(self, event, x0, y0, x1, y1):
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b = color.Get()
color.Set(r,g,b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangleRect(rect)
| mit |
cdegroc/scikit-learn | sklearn/decomposition/dict_learning.py | 2 | 41775 | """ Dictionary learning
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD
import time
import sys
import itertools
import warnings
from math import sqrt, floor, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..utils import array2d, check_random_state, gen_even_slices, deprecated
from ..utils.extmath import randomized_svd
from ..linear_model import Lasso, orthogonal_mp_gram, lars_path
def _sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_gram=True,
copy_cov=True, init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_atoms, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: array, shape=(n_atoms, n_atoms)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_atoms, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * data'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_atoms)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_gram: boolean, optional
Whether to copy the precomputed Gram matrix; if False, it may be
overwritten.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
alpha = float(alpha) if alpha is not None else None
dictionary = np.asarray(dictionary)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_atoms = dictionary.shape[0]
# This will always use Gram
if gram is None:
# I think it's never safe to overwrite Gram when n_features > 1
# but I'd like to avoid the complicated logic.
# The parameter could be removed in this case. Discuss.
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
if alpha is None:
alpha = 1.
alpha /= n_features # account for scaling
try:
new_code = np.empty((n_samples, n_atoms))
err_mgt = np.seterr(all='ignore')
for k in range(n_samples):
# A huge amount of time is spent in this loop. It needs to be
# tight.
_, _, coef_path_ = lars_path(dictionary.T, X[k], Xy=cov[:, k],
Gram=gram, alpha_min=alpha,
method='lasso')
new_code[k] = coef_path_[:, -1]
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
if alpha is None:
alpha = 1.
alpha /= n_features # account for scaling
new_code = np.empty((n_samples, n_atoms))
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=1000)
for k in xrange(n_samples):
# A huge amount of time is spent in this loop. It needs to be
# tight
if init is not None:
clf.coef_ = init[k] # Init with previous value of the code
clf.fit(dictionary.T, X[k])
new_code[k] = clf.coef_
elif algorithm == 'lars':
if n_nonzero_coefs is None:
n_nonzero_coefs = max(n_features / 10, 1)
try:
new_code = np.empty((n_samples, n_atoms))
err_mgt = np.seterr(all='ignore')
for k in xrange(n_samples):
# A huge amount of time is spent in this loop. It needs to be
# tight.
_, _, coef_path_ = lars_path(dictionary.T, X[k], Xy=cov[:, k],
Gram=gram, method='lar',
max_iter=n_nonzero_coefs)
new_code[k] = coef_path_[:, -1]
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
if alpha is None:
alpha = 1.
new_code = (np.sign(cov) * np.maximum(np.abs(cov) - alpha, 0)).T
elif algorithm == 'omp':
if n_nonzero_coefs is None and alpha is None:
n_nonzero_coefs = max(n_features / 10, 1)
norms_squared = np.sum((X ** 2), axis=1)
new_code = orthogonal_mp_gram(gram, cov, n_nonzero_coefs, alpha,
norms_squared, copy_Xy=copy_cov
).T
else:
raise NotImplemented('Sparse coding method %s not implemented' %
algorithm)
return new_code
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_gram=True,
copy_cov=True, init=None, max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_atoms, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_atoms, n_atoms)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_atoms, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_atoms)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_gram: boolean, optional
Whether to copy the precomputed Gram matrix; if False, it may be
overwritten.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_atoms)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
warnings.warn("Please note: the interface of sparse_encode has changed: "
"It now follows the dictionary learning API and it also "
"handles parallelization. Please read the docstring for "
"more information.")
dictionary = np.asarray(dictionary)
X = np.asarray(X)
n_samples, n_features = X.shape
n_atoms = dictionary.shape[0]
if gram is None:
copy_gram = False
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov, algorithm,
n_nonzero_coefs, alpha, copy_gram, copy_cov, init)
code = np.empty((n_samples, n_atoms))
slices = list(gen_even_slices(n_samples, n_jobs))
code_views = Parallel(n_jobs=n_jobs)(
delayed(sparse_encode)(X[this_slice], dictionary, gram,
cov[:, this_slice], algorithm,
n_nonzero_coefs, alpha,
copy_gram, copy_cov,
init=init[this_slice] if init is not
None else None)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
@deprecated('Use sparse_encode instead')
def sparse_encode_parallel():
pass
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_atoms)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_atoms, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_atoms)
Updated dictionary.
"""
n_atoms = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in xrange(n_atoms):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print "Adding new random atom"
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_atoms, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_atoms
where V is the dictionary and U is the sparse code.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_atoms: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_atoms, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_atoms),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code: array of shape (n_samples, n_atoms)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_atoms, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init U and V with SVD of Y
if code_init is not None and code_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_atoms <= r: # True even if n_atoms=None
code = code[:, :n_atoms]
dictionary = dictionary[:n_atoms, :]
else:
code = np.c_[code, np.zeros((len(code), n_atoms - r))]
dictionary = np.r_[dictionary,
np.zeros((n_atoms - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print '[dict_learning]',
for ii in xrange(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" %
(ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print ""
elif verbose:
print "--- Convergence reached after %d iterations" % ii
break
if ii % 5 == 0 and callback is not None:
callback(locals())
return code, dictionary, errors
def dict_learning_online(X, n_atoms, alpha, n_iter=100, return_code=True,
dict_init=None, callback=None, chunk_size=3,
verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_atoms
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Parameters
----------
X: array of shape (n_samples, n_features)
data matrix
n_atoms: int,
number of dictionary atoms to extract
alpha: int,
sparsity controlling parameter
n_iter: int,
number of iterations to perform
return_code: boolean,
whether to also return the code U or just the dictionary V
dict_init: array of shape (n_atoms, n_features),
initial value for the dictionary for warm restart scenarios
callback:
callable that gets invoked every five iterations
chunk_size: int,
the number of samples to take in each batch
verbose:
degree of output the procedure will print
shuffle: boolean,
whether to shuffle the data before splitting it in batches
n_jobs: int,
number of parallel jobs to run, or -1 to autodetect.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset: int, default 0
number of previous iterations completed on the dictionary used for
initialization
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code: array of shape (n_samples, n_atoms),
the sparse code (only returned if `return_code=True`)
dictionary: array of shape (n_atoms, n_features),
the solutions to the dictionary learning problem
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_atoms)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_atoms <= r:
dictionary = dictionary[:n_atoms, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_atoms - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print '[dict_learning]',
n_batches = floor(float(len(X)) / chunk_size)
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = np.array_split(X_train, n_batches)
batches = itertools.cycle(batches)
# The covariance of the dictionary
A = np.zeros((n_atoms, n_atoms))
# The data approximation
B = np.zeros((n_features, n_atoms))
for ii, this_X in itertools.izip(xrange(iter_offset, iter_offset + n_iter),
batches):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)" %
(ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha).T
# Update the auxiliary variables
if ii < chunk_size - 1:
theta = float((ii + 1) * chunk_size)
else:
theta = float(chunk_size ** 2 + ii + 1 - chunk_size)
beta = (theta + 1 - chunk_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_code:
if verbose > 1:
print 'Learning code...',
elif verbose == 1:
print '|',
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print 'done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)
return code, dictionary.T
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_atoms, transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_atoms = n_atoms
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
# XXX : kwargs is not documented
X = array2d(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
dictionary : array, [n_atoms, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
`components_` : array, [n_atoms, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_atoms
Parameters
----------
n_atoms : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_atoms),
initial value for the code, for warm restart
dict_init : array of shape (n_atoms, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_atoms, n_features]
dictionary atoms extracted from the data
`error_` : array
vector of errors at each iteration
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_atoms, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_atoms, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
self.random_state = check_random_state(self.random_state)
X = np.asarray(X)
V, U, E = dict_learning(X, self.n_atoms, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=self.random_state)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_atoms
Parameters
----------
n_atoms : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_atoms, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
chunk_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_atoms, n_features]
components extracted from the data
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_atoms, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, chunk_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_atoms, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.chunk_size = chunk_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = np.asarray(X)
U = dict_learning_online(X, self.n_atoms, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
dict_init=self.dict_init,
chunk_size=self.chunk_size,
shuffle=self.shuffle, verbose=self.verbose,
random_state=self.random_state)
self.components_ = U
return self
def partial_fit(self, X, y=None, iter_offset=0):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
U = dict_learning_online(X, self.n_atoms, self.alpha,
n_iter=self.n_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
chunk_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset,
random_state=self.random_state)
self.components_ = U
return self
| bsd-3-clause |
mailhexu/pyDFTutils | pyDFTutils/wannier90/pythtb_forj.py | 2 | 153581 | from __future__ import print_function
# PythTB python tight binding module.
# December 22, 2016
__version__='1.7.1'
# Copyright 2010, 2012, 2016 by Sinisa Coh and David Vanderbilt
#
# This file is part of PythTB. PythTB is free software: you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# PythTB is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# A copy of the GNU General Public License should be available
# alongside this source in a file named gpl-3.0.txt. If not,
# see <http://www.gnu.org/licenses/>.
#
# PythTB is availabe at http://www.physics.rutgers.edu/pythtb/
import numpy as np # numerics for matrices
import sys # for exiting
import copy # for deepcopying
class tb_model(object):
r"""
This is the main class of the PythTB package which contains all
information for the tight-binding model.
:param dim_k: Dimensionality of reciprocal space, i.e., specifies how
many directions are considered to be periodic.
:param dim_r: Dimensionality of real space, i.e., specifies how many
real space lattice vectors there are and how many coordinates are
needed to specify the orbital coordinates.
.. note:: Parameter *dim_r* can be larger than *dim_k*! For example,
a polymer is a three-dimensional molecule (one needs three
coordinates to specify orbital positions), but it is periodic
along only one direction. For a polymer, therefore, we should
have *dim_k* equal to 1 and *dim_r* equal to 3. See similar example
here: :ref:`trestle-example`.
:param lat: Array containing lattice vectors in Cartesian
coordinates (in arbitrary units). In example the below, the first
lattice vector has coordinates [1.0,0.5] while the second
one has coordinates [0.0,2.0]. By default, lattice vectors
are an identity matrix.
:param orb: Array containing reduced coordinates of all
tight-binding orbitals. In the example below, the first
orbital is defined with reduced coordinates [0.2,0.3]. Its
Cartesian coordinates are therefore 0.2 times the first
lattice vector plus 0.3 times the second lattice vector.
If *orb* is an integer code will assume that there are these many
orbitals all at the origin of the unit cell. By default
the code will assume a single orbital at the origin.
:param per: This is an optional parameter giving a list of lattice
vectors which are considered to be periodic. In the example below,
only the vector [0.0,2.0] is considered to be periodic (since
per=[1]). By default, all lattice vectors are assumed to be
periodic. If dim_k is smaller than dim_r, then by default the first
dim_k vectors are considered to be periodic.
:param nspin: Number of explicit spin components assumed for each
orbital in *orb*. Allowed values of *nspin* are *1* and *2*. If
*nspin* is 1 then the model is spinless, if *nspin* is 2 then it
is explicitly a spinfull model and each orbital is assumed to
have two spin components. Default value of this parameter is
*1*. Of course one can make spinfull calculation even with
*nspin* set to 1, but then the user must keep track of which
orbital corresponds to which spin component.
Example usage::
# Creates model that is two-dimensional in real space but only
# one-dimensional in reciprocal space. Second lattice vector is
# chosen to be periodic (since per=[1]). Three orbital
# coordinates are specified.
tb = tb_model(1, 2,
lat=[[1.0, 0.5], [0.0, 2.0]],
orb=[[0.2, 0.3], [0.1, 0.1], [0.2, 0.2]],
per=[1])
"""
def __init__(self,dim_k,dim_r,lat=None,orb=None,per=None,nspin=1):
# initialize _dim_k = dimensionality of k-space (integer)
if type(dim_k).__name__!='int':
raise Exception("\n\nArgument dim_k not an integer")
if dim_k < 0 or dim_k > 4:
raise Exception("\n\nArgument dim_k out of range. Must be between 0 and 4.")
self._dim_k=dim_k
# initialize _dim_r = dimensionality of r-space (integer)
if type(dim_r).__name__!='int':
raise Exception("\n\nArgument dim_r not an integer")
if dim_r < dim_k or dim_r > 4:
raise Exception("\n\nArgument dim_r out of range. Must be dim_r>=dim_k and dim_r<=4.")
self._dim_r=dim_r
# initialize _lat = lattice vectors, array of dim_r*dim_r
# format is _lat(lat_vec_index,cartesian_index)
# special option: 'unit' implies unit matrix, also default value
if lat is 'unit' or lat is None:
self._lat=np.identity(dim_r,float)
print(" Lattice vectors not specified! I will use identity matrix.")
elif type(lat).__name__ not in ['list','ndarray']:
raise Exception("\n\nArgument lat is not a list.")
else:
self._lat=np.array(lat,dtype=float)
if self._lat.shape!=(dim_r,dim_r):
raise Exception("\n\nWrong lat array dimensions")
# check that volume is not zero and that have right handed system
if dim_r>0:
if np.abs(np.linalg.det(self._lat))<1.0E-6:
raise Exception("\n\nLattice vectors length/area/volume too close to zero, or zero.")
if np.linalg.det(self._lat)<0.0:
raise Exception("\n\nLattice vectors need to form right handed system.")
# initialize _norb = number of basis orbitals per cell
# and _orb = orbital locations, in reduced coordinates
# format is _orb(orb_index,lat_vec_index)
# special option: 'bravais' implies one atom at origin
if orb is 'bravais' or orb is None:
self._norb=1
self._orb=np.zeros((1,dim_r))
print(" Orbital positions not specified. I will assume a single orbital at the origin.")
elif type(orb).__name__=='int':
self._norb=orb
self._orb=np.zeros((orb,dim_r))
print(" Orbital positions not specified. I will assume ",orb," orbitals at the origin")
elif type(orb).__name__ not in ['list','ndarray']:
raise Exception("\n\nArgument orb is not a list or an integer")
else:
self._orb=np.array(orb,dtype=float)
if len(self._orb.shape)!=2:
raise Exception("\n\nWrong orb array rank")
self._norb=self._orb.shape[0] # number of orbitals
if self._orb.shape[1]!=dim_r:
raise Exception("\n\nWrong orb array dimensions")
# choose which self._dim_k out of self._dim_r dimensions are
# to be considered periodic.
if per==None:
# by default first _dim_k dimensions are periodic
self._per=list(range(self._dim_k))
else:
if len(per)!=self._dim_k:
raise Exception("\n\nWrong choice of periodic/infinite direction!")
# store which directions are the periodic ones
self._per=per
# remember number of spin components
if nspin not in [1,2]:
raise Exception("\n\nWrong value of nspin, must be 1 or 2!")
self._nspin=nspin
# by default, assume model did not come from w90 object and that
# position operator is diagonal
self._assume_position_operator_diagonal=True
# compute number of electronic states at each k-point
self._nsta=self._norb*self._nspin
# Initialize onsite energies to zero
if self._nspin==1:
self._site_energies=np.zeros((self._norb),dtype=float)
elif self._nspin==2:
self._site_energies=np.zeros((self._norb,2,2),dtype=complex)
# remember which onsite energies user has specified
self._site_energies_specified=np.zeros(self._norb,dtype=bool)
self._site_energies_specified[:]=False
# Initialize hoppings to empty list
self._hoppings=[]
# The onsite energies and hoppings are not specified
# when creating a 'tb_model' object. They are speficied
# subsequently by separate function calls defined below.
def set_onsite(self,onsite_en,ind_i=None,mode="set"):
r"""
Defines on-site energies for tight-binding orbitals. One can
either set energy for one tight-binding orbital, or all at
once.
.. warning:: In previous version of PythTB this function was
called *set_sites*. For backwards compatibility one can still
use that name but that feature will be removed in future
releases.
:param onsite_en: Either a list of on-site energies (in
arbitrary units) for each orbital, or a single on-site
energy (in this case *ind_i* parameter must be given). In
the case when *nspin* is *1* (spinless) then each on-site
energy is a single number. If *nspin* is *2* then on-site
energy can be given either as a single number, or as an
array of four numbers, or 2x2 matrix. If a single number is
given, it is interpreted as on-site energy for both up and
down spin component. If an array of four numbers is given,
these are the coefficients of I, sigma_x, sigma_y, and
sigma_z (that is, the 2x2 identity and the three Pauli spin
matrices) respectively. Finally, full 2x2 matrix can be
given as well. If this function is never called, on-site
energy is assumed to be zero.
:param ind_i: Index of tight-binding orbital whose on-site
energy you wish to change. This parameter should be
specified only when *onsite_en* is a single number (not a
list).
:param mode: Similar to parameter *mode* in function set_hop*.
Speficies way in which parameter *onsite_en* is
used. It can either set value of on-site energy from scratch,
reset it, or add to it.
* "set" -- Default value. On-site energy is set to value of
*onsite_en* parameter. One can use "set" on each
tight-binding orbital only once.
* "reset" -- Specifies on-site energy to given value. This
function can be called multiple times for the same
orbital(s).
* "add" -- Adds to the previous value of on-site
energy. This function can be called multiple times for the
same orbital(s).
Example usage::
# Defines on-site energy of first orbital to be 0.0,
# second 1.0, and third 2.0
tb.set_onsite([0.0, 1.0, 2.0])
# Increases value of on-site energy for second orbital
tb.set_onsite(100.0, 1, mode="add")
# Changes on-site energy of second orbital to zero
tb.set_onsite(0.0, 1, mode="reset")
# Sets all three on-site energies at once
tb.set_onsite([2.0, 3.0, 4.0], mode="reset")
"""
if ind_i==None:
if (len(onsite_en)!=self._norb):
raise Exception("\n\nWrong number of site energies")
# make sure ind_i is not out of scope
if ind_i!=None:
if ind_i<0 or ind_i>=self._norb:
raise Exception("\n\nIndex ind_i out of scope.")
# make sure that onsite terms are real/hermitian
if ind_i!=None:
to_check=[onsite_en]
else:
to_check=onsite_en
for ons in to_check:
if np.array(ons).shape==():
if np.abs(np.array(ons)-np.array(ons).conjugate())>1.0E-8:
raise Exception("\n\nOnsite energy should not have imaginary part!")
elif np.array(ons).shape==(4,):
if np.max(np.abs(np.array(ons)-np.array(ons).conjugate()))>1.0E-8:
raise Exception("\n\nOnsite energy or Zeeman field should not have imaginary part!")
elif np.array(ons).shape==(2,2):
if np.max(np.abs(np.array(ons)-np.array(ons).T.conjugate()))>1.0E-8:
raise Exception("\n\nOnsite matrix should be Hermitian!")
# specifying onsite energies from scratch, can be called only once
if mode.lower()=="set":
# specifying only one site at a time
if ind_i!=None:
# make sure we specify things only once
if self._site_energies_specified[ind_i]==True:
raise Exception("\n\nOnsite energy for this site was already specified! Use mode=\"reset\" or mode=\"add\".")
else:
self._site_energies[ind_i]=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
# make sure we specify things only once
if True in self._site_energies_specified[ind_i]:
raise Exception("\n\nSome or all onsite energies were already specified! Use mode=\"reset\" or mode=\"add\".")
else:
for i in range(self._norb):
self._site_energies[i]=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
# reset values of onsite terms, without adding to previous value
elif mode.lower()=="reset":
# specifying only one site at a time
if ind_i!=None:
self._site_energies[ind_i]=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
for i in range(self._norb):
self._site_energies[i]=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
# add to previous value
elif mode.lower()=="add":
# specifying only one site at a time
if ind_i!=None:
self._site_energies[ind_i]+=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
for i in range(self._norb):
self._site_energies[i]+=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
else:
raise Exception("\n\nWrong value of mode parameter")
def set_hop(self,hop_amp,ind_i,ind_j,ind_R=None,mode="set",allow_conjugate_pair=False):
r"""
Defines hopping parameters between tight-binding orbitals. In
the notation used in section 3.1 equation 3.6 of
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` this function specifies the
following object
.. math::
H_{ij}({\bf R})= \langle \phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle
Where :math:`\langle \phi_{{\bf 0} i} \vert` is i-th
tight-binding orbital in the home unit cell and
:math:`\vert \phi_{{\bf R},j} \rangle` is j-th tight-binding orbital in
unit cell shifted by lattice vector :math:`{\bf R}`. :math:`H`
is the Hamiltonian.
(Strictly speaking, this term specifies hopping amplitude
for hopping from site *j+R* to site *i*, not vice-versa.)
Hopping in the opposite direction is automatically included by
the code since
.. math::
H_{ji}(-{\bf R})= \left[ H_{ij}({\bf R}) \right]^{*}
.. warning::
There is no need to specify hoppings in both :math:`i
\rightarrow j+R` direction and opposite :math:`j
\rightarrow i-R` direction since that is done
automatically. If you want to specifiy hoppings in both
directions, see description of parameter
*allow_conjugate_pair*.
.. warning:: In previous version of PythTB this function was
called *add_hop*. For backwards compatibility one can still
use that name but that feature will be removed in future
releases.
:param hop_amp: Hopping amplitude; can be real or complex
number, equals :math:`H_{ij}({\bf R})`. If *nspin* is *2*
then hopping amplitude can be given either as a single
number, or as an array of four numbers, or as 2x2 matrix. If
a single number is given, it is interpreted as hopping
amplitude for both up and down spin component. If an array
of four numbers is given, these are the coefficients of I,
sigma_x, sigma_y, and sigma_z (that is, the 2x2 identity and
the three Pauli spin matrices) respectively. Finally, full
2x2 matrix can be given as well.
:param ind_i: Index of bra orbital from the bracket :math:`\langle
\phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle`. This
orbital is assumed to be in the home unit cell.
:param ind_j: Index of ket orbital from the bracket :math:`\langle
\phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle`. This
orbital does not have to be in the home unit cell; its unit cell
position is determined by parameter *ind_R*.
:param ind_R: Specifies, in reduced coordinates, the shift of
the ket orbital. The number of coordinates must equal the
dimensionality in real space (*dim_r* parameter) for consistency,
but only the periodic directions of ind_R will be considered. If
reciprocal space is zero-dimensional (as in a molecule),
this parameter does not need to be specified.
:param mode: Similar to parameter *mode* in function *set_onsite*.
Speficies way in which parameter *hop_amp* is
used. It can either set value of hopping term from scratch,
reset it, or add to it.
* "set" -- Default value. Hopping term is set to value of
*hop_amp* parameter. One can use "set" for each triplet of
*ind_i*, *ind_j*, *ind_R* only once.
* "reset" -- Specifies on-site energy to given value. This
function can be called multiple times for the same triplet
*ind_i*, *ind_j*, *ind_R*.
* "add" -- Adds to the previous value of hopping term This
function can be called multiple times for the same triplet
*ind_i*, *ind_j*, *ind_R*.
If *set_hop* was ever called with *allow_conjugate_pair* set
to True, then it is possible that user has specified both
:math:`i \rightarrow j+R` and conjugate pair :math:`j
\rightarrow i-R`. In this case, "set", "reset", and "add"
parameters will treat triplet *ind_i*, *ind_j*, *ind_R* and
conjugate triplet *ind_j*, *ind_i*, *-ind_R* as distinct.
:param allow_conjugate_pair: Default value is *False*. If set
to *True* code will allow user to specify hopping
:math:`i \rightarrow j+R` even if conjugate-pair hopping
:math:`j \rightarrow i-R` has been
specified. If both terms are specified, code will
still count each term two times.
Example usage::
# Specifies complex hopping amplitude between first orbital in home
# unit cell and third orbital in neigbouring unit cell.
tb.set_hop(0.3+0.4j, 0, 2, [0, 1])
# change value of this hopping
tb.set_hop(0.1+0.2j, 0, 2, [0, 1], mode="reset")
# add to previous value (after this function call below,
# hopping term amplitude is 100.1+0.2j)
tb.set_hop(100.0, 0, 2, [0, 1], mode="add")
"""
#
if self._dim_k!=0 and (ind_R is None):
raise Exception("\n\nNeed to specify ind_R!")
# if necessary convert from integer to array
if self._dim_k==1 and type(ind_R).__name__=='int':
tmpR=np.zeros(self._dim_r,dtype=int)
tmpR[self._per]=ind_R
ind_R=tmpR
# check length of ind_R
if self._dim_k!=0:
if len(ind_R)!=self._dim_r:
raise Exception("\n\nLength of input ind_R vector must equal dim_r! Even if dim_k<dim_r.")
# make sure ind_i and ind_j are not out of scope
if ind_i<0 or ind_i>=self._norb:
raise Exception("\n\nIndex ind_i out of scope.")
if ind_j<0 or ind_j>=self._norb:
raise Exception("\n\nIndex ind_j out of scope.")
# do not allow onsite hoppings to be specified here because then they
# will be double-counted
if self._dim_k==0:
if ind_i==ind_j:
raise Exception("\n\nDo not use set_hop for onsite terms. Use set_onsite instead!")
else:
if ind_i==ind_j:
all_zer=True
for k in self._per:
if int(ind_R[k])!=0:
all_zer=False
if all_zer==True:
raise Exception("\n\nDo not use set_hop for onsite terms. Use set_onsite instead!")
#
# make sure that if <i|H|j+R> is specified that <j|H|i-R> is not!
if allow_conjugate_pair==False:
for h in self._hoppings:
if ind_i==h[2] and ind_j==h[1]:
if self._dim_k==0:
raise Exception(\
"""\n
Following matrix element was already implicitely specified:
i="""+str(ind_i)+" j="+str(ind_j)+"""
Remember, specifying <i|H|j> automatically specifies <j|H|i>. For
consistency, specify all hoppings for a given bond in the same
direction. (Or, alternatively, see the documentation on the
'allow_conjugate_pair' flag.)
""")
elif False not in (np.array(ind_R)[self._per]==(-1)*np.array(h[3])[self._per]):
raise Exception(\
"""\n
Following matrix element was already implicitely specified:
i="""+str(ind_i)+" j="+str(ind_j)+" R="+str(ind_R)+"""
Remember,specifying <i|H|j+R> automatically specifies <j|H|i-R>. For
consistency, specify all hoppings for a given bond in the same
direction. (Or, alternatively, see the documentation on the
'allow_conjugate_pair' flag.)
""")
# convert to 2by2 matrix if needed
hop_use=self._val_to_block(hop_amp)
# hopping term parameters to be stored
if self._dim_k==0:
new_hop=[hop_use,int(ind_i),int(ind_j)]
else:
new_hop=[hop_use,int(ind_i),int(ind_j),np.array(ind_R)]
#
# see if there is a hopping term with same i,j,R
use_index=None
for iih,h in enumerate(self._hoppings):
# check if the same
same_ijR=False
if ind_i==h[1] and ind_j==h[2]:
if self._dim_k==0:
same_ijR=True
else:
if False not in (np.array(ind_R)[self._per]==np.array(h[3])[self._per]):
same_ijR=True
# if they are the same then store index of site at which they are the same
if same_ijR==True:
use_index=iih
#
# specifying hopping terms from scratch, can be called only once
if mode.lower()=="set":
# make sure we specify things only once
if use_index!=None:
raise Exception("\n\nHopping energy for this site was already specified! Use mode=\"reset\" or mode=\"add\".")
else:
self._hoppings.append(new_hop)
# reset value of hopping term, without adding to previous value
elif mode.lower()=="reset":
if use_index!=None:
self._hoppings[use_index]=new_hop
else:
self._hoppings.append(new_hop)
# add to previous value
elif mode.lower()=="add":
if use_index!=None:
self._hoppings[use_index][0]+=new_hop[0]
else:
self._hoppings.append(new_hop)
else:
raise Exception("\n\nWrong value of mode parameter")
def _val_to_block(self,val):
"""If nspin=2 then returns a 2 by 2 matrix from the input
parameters. If only one real number is given in the input then
assume that this is the diagonal term. If array with four
elements is given then first one is the diagonal term, and
other three are Zeeman field direction. If given a 2 by 2
matrix, just return it. If nspin=1 then just returns val."""
# spinless case
if self._nspin==1:
return val
# spinfull case
elif self._nspin==2:
# matrix to return
ret=np.zeros((2,2),dtype=complex)
#
use_val=np.array(val)
# only one number is given
if use_val.shape==():
ret[0,0]+=use_val
ret[1,1]+=use_val
# if four numbers are given
elif use_val.shape==(4,):
# diagonal
ret[0,0]+=use_val[0]
ret[1,1]+=use_val[0]
# sigma_x
ret[0,1]+=use_val[1]
ret[1,0]+=use_val[1]
# sigma_y
ret[0,1]+=use_val[2]*(-1.0j)
ret[1,0]+=use_val[2]*( 1.0j)
# sigma_z
ret[0,0]+=use_val[3]
ret[1,1]+=use_val[3]*(-1.0)
# if 2 by 2 matrix is given
elif use_val.shape==(2,2):
return use_val
else:
raise Exception(\
"""\n
Wrong format of the on-site or hopping term. Must be single number, or
in the case of a spinfull model can be array of four numbers or 2x2
matrix.""")
return ret
def display(self):
r"""
Prints on the screen some information about this tight-binding
model. This function doesn't take any parameters.
"""
print('---------------------------------------')
print('report of tight-binding model')
print('---------------------------------------')
print('k-space dimension =',self._dim_k)
print('r-space dimension =',self._dim_r)
print('number of spin components =',self._nspin)
print('periodic directions =',self._per)
print('number of orbitals =',self._norb)
print('number of electronic states =',self._nsta)
print('lattice vectors:')
for i,o in enumerate(self._lat):
print(" #",_nice_int(i,2)," ===> [", end=' ')
for j,v in enumerate(o):
print(_nice_float(v,7,4), end=' ')
if j!=len(o)-1:
print(",", end=' ')
print("]")
print('positions of orbitals:')
for i,o in enumerate(self._orb):
print(" #",_nice_int(i,2)," ===> [", end=' ')
for j,v in enumerate(o):
print(_nice_float(v,7,4), end=' ')
if j!=len(o)-1:
print(",", end=' ')
print("]")
print('site energies:')
for i,site in enumerate(self._site_energies):
print(" #",_nice_int(i,2)," ===> ", end=' ')
if self._nspin==1:
print(_nice_float(site,7,4))
elif self._nspin==2:
print(str(site).replace("\n"," "))
print('hoppings:')
for i,hopping in enumerate(self._hoppings):
print("<",_nice_int(hopping[1],2),"| H |",_nice_int(hopping[2],2), end=' ')
if len(hopping)==4:
print("+ [", end=' ')
for j,v in enumerate(hopping[3]):
print(_nice_int(v,2), end=' ')
if j!=len(hopping[3])-1:
print(",", end=' ')
else:
print("]", end=' ')
print("> ===> ", end=' ')
if self._nspin==1:
print(_nice_complex(hopping[0],7,4))
elif self._nspin==2:
print(str(hopping[0]).replace("\n"," "))
print()
def visualize(self,dir_first,dir_second=None,eig_dr=None,draw_hoppings=True,ph_color="black"):
r"""
Rudimentary function for visualizing tight-binding model geometry,
hopping between tight-binding orbitals, and electron eigenstates.
If eigenvector is not drawn, then orbitals in home cell are drawn
as red circles, and those in neighboring cells are drawn with
different shade of red. Hopping term directions are drawn with
green lines connecting two orbitals. Origin of unit cell is
indicated with blue dot, while real space unit vectors are drawn
with blue lines.
If eigenvector is drawn, then electron eigenstate on each orbital
is drawn with a circle whose size is proportional to wavefunction
amplitude while its color depends on the phase. There are various
coloring schemes for the phase factor; see more details under
*ph_color* parameter. If eigenvector is drawn and coloring scheme
is "red-blue" or "wheel", all other elements of the picture are
drawn in gray or black.
:param dir_first: First index of Cartesian coordinates used for
plotting.
:param dir_second: Second index of Cartesian coordinates used for
plotting. For example if dir_first=0 and dir_second=2, and
Cartesian coordinates of some orbital is [2.0,4.0,6.0] then it
will be drawn at coordinate [2.0,6.0]. If dimensionality of real
space (*dim_r*) is zero or one then dir_second should not be
specified.
:param eig_dr: Optional parameter specifying eigenstate to
plot. If specified, this should be one-dimensional array of
complex numbers specifying wavefunction at each orbital in
the tight-binding basis. If not specified, eigenstate is not
drawn.
:param draw_hoppings: Optional parameter specifying whether to
draw all allowed hopping terms in the tight-binding
model. Default value is True.
:param ph_color: Optional parameter determining the way
eigenvector phase factors are translated into color. Default
value is "black". Convention of the wavefunction phase is as
in convention 1 in section 3.1 of :download:`notes on
tight-binding formalism <misc/pythtb-formalism.pdf>`. In
other words, these wavefunction phases are in correspondence
with cell-periodic functions :math:`u_{n {\bf k}} ({\bf r})`
not :math:`\Psi_{n {\bf k}} ({\bf r})`.
* "black" -- phase of eigenvectors are ignored and wavefunction
is always colored in black.
* "red-blue" -- zero phase is drawn red, while phases or pi or
-pi are drawn blue. Phases in between are interpolated between
red and blue. Some phase information is lost in this coloring
becase phase of +phi and -phi have same color.
* "wheel" -- each phase is given unique color. In steps of pi/3
starting from 0, colors are assigned (in increasing hue) as:
red, yellow, green, cyan, blue, magenta, red.
:returns:
* **fig** -- Figure object from matplotlib.pyplot module
that can be used to save the figure in PDF, EPS or similar
format, for example using fig.savefig("name.pdf") command.
* **ax** -- Axes object from matplotlib.pyplot module that can be
used to tweak the plot, for example by adding a plot title
ax.set_title("Title goes here").
Example usage::
# Draws x-y projection of tight-binding model
# tweaks figure and saves it as a PDF.
(fig, ax) = tb.visualize(0, 1)
ax.set_title("Title goes here")
fig.savefig("model.pdf")
See also these examples: :ref:`edge-example`,
:ref:`visualize-example`.
"""
# check the format of eig_dr
if not (eig_dr is None):
if eig_dr.shape!=(self._norb,):
raise Exception("\n\nWrong format of eig_dr! Must be array of size norb.")
# check that ph_color is correct
if ph_color not in ["black","red-blue","wheel"]:
raise Exception("\n\nWrong value of ph_color parameter!")
# check if dir_second had to be specified
if dir_second==None and self._dim_r>1:
raise Exception("\n\nNeed to specify index of second coordinate for projection!")
# start a new figure
import pylab as plt
fig=plt.figure(figsize=[plt.rcParams["figure.figsize"][0],
plt.rcParams["figure.figsize"][0]])
ax=fig.add_subplot(111, aspect='equal')
def proj(v):
"Project vector onto drawing plane"
coord_x=v[dir_first]
if dir_second==None:
coord_y=0.0
else:
coord_y=v[dir_second]
return [coord_x,coord_y]
def to_cart(red):
"Convert reduced to Cartesian coordinates"
return np.dot(red,self._lat)
# define colors to be used in plotting everything
# except eigenvectors
if (eig_dr is None) or ph_color=="black":
c_cell="b"
c_orb="r"
c_nei=[0.85,0.65,0.65]
c_hop="g"
else:
c_cell=[0.4,0.4,0.4]
c_orb=[0.0,0.0,0.0]
c_nei=[0.6,0.6,0.6]
c_hop=[0.0,0.0,0.0]
# determine color scheme for eigenvectors
def color_to_phase(ph):
if ph_color=="black":
return "k"
if ph_color=="red-blue":
ph=np.abs(ph/np.pi)
return [1.0-ph,0.0,ph]
if ph_color=="wheel":
if ph<0.0:
ph=ph+2.0*np.pi
ph=6.0*ph/(2.0*np.pi)
x_ph=1.0-np.abs(ph%2.0-1.0)
if ph>=0.0 and ph<1.0: ret_col=[1.0 ,x_ph,0.0 ]
if ph>=1.0 and ph<2.0: ret_col=[x_ph,1.0 ,0.0 ]
if ph>=2.0 and ph<3.0: ret_col=[0.0 ,1.0 ,x_ph]
if ph>=3.0 and ph<4.0: ret_col=[0.0 ,x_ph,1.0 ]
if ph>=4.0 and ph<5.0: ret_col=[x_ph,0.0 ,1.0 ]
if ph>=5.0 and ph<=6.0: ret_col=[1.0 ,0.0 ,x_ph]
return ret_col
# draw origin
ax.plot([0.0],[0.0],"o",c=c_cell,mec="w",mew=0.0,zorder=7,ms=4.5)
# first draw unit cell vectors which are considered to be periodic
for i in self._per:
# pick a unit cell vector and project it down to the drawing plane
vec=proj(self._lat[i])
ax.plot([0.0,vec[0]],[0.0,vec[1]],"-",c=c_cell,lw=1.5,zorder=7)
# now draw all orbitals
for i in range(self._norb):
# find position of orbital in cartesian coordinates
pos=to_cart(self._orb[i])
pos=proj(pos)
ax.plot([pos[0]],[pos[1]],"o",c=c_orb,mec="w",mew=0.0,zorder=10,ms=4.0)
# draw hopping terms
if draw_hoppings==True:
for h in self._hoppings:
# draw both i->j+R and i-R->j hop
for s in range(2):
# get "from" and "to" coordinates
pos_i=np.copy(self._orb[h[1]])
pos_j=np.copy(self._orb[h[2]])
# add also lattice vector if not 0-dim
if self._dim_k!=0:
if s==0:
pos_j[self._per]=pos_j[self._per]+h[3][self._per]
if s==1:
pos_i[self._per]=pos_i[self._per]-h[3][self._per]
# project down vector to the plane
pos_i=np.array(proj(to_cart(pos_i)))
pos_j=np.array(proj(to_cart(pos_j)))
# add also one point in the middle to bend the curve
prcnt=0.05 # bend always by this ammount
pos_mid=(pos_i+pos_j)*0.5
dif=pos_j-pos_i # difference vector
orth=np.array([dif[1],-1.0*dif[0]]) # orthogonal to difference vector
orth=orth/np.sqrt(np.dot(orth,orth)) # normalize
pos_mid=pos_mid+orth*prcnt*np.sqrt(np.dot(dif,dif)) # shift mid point in orthogonal direction
# draw hopping
all_pnts=np.array([pos_i,pos_mid,pos_j]).T
ax.plot(all_pnts[0],all_pnts[1],"-",c=c_hop,lw=0.75,zorder=8)
# draw "from" and "to" sites
ax.plot([pos_i[0]],[pos_i[1]],"o",c=c_nei,zorder=9,mew=0.0,ms=4.0,mec="w")
ax.plot([pos_j[0]],[pos_j[1]],"o",c=c_nei,zorder=9,mew=0.0,ms=4.0,mec="w")
# now draw the eigenstate
if not (eig_dr is None):
for i in range(self._norb):
# find position of orbital in cartesian coordinates
pos=to_cart(self._orb[i])
pos=proj(pos)
# find norm of eigenfunction at this point
nrm=(eig_dr[i]*eig_dr[i].conjugate()).real
# rescale and get size of circle
nrm_rad=2.0*nrm*float(self._norb)
# get color based on the phase of the eigenstate
phase=np.angle(eig_dr[i])
c_ph=color_to_phase(phase)
ax.plot([pos[0]],[pos[1]],"o",c=c_ph,mec="w",mew=0.0,ms=nrm_rad,zorder=11,alpha=0.8)
# center the image
# first get the current limit, which is probably tight
xl=ax.set_xlim()
yl=ax.set_ylim()
# now get the center of current limit
centx=(xl[1]+xl[0])*0.5
centy=(yl[1]+yl[0])*0.5
# now get the maximal size (lengthwise or heightwise)
mx=max([xl[1]-xl[0],yl[1]-yl[0]])
# set new limits
extr=0.05 # add some boundary as well
ax.set_xlim(centx-mx*(0.5+extr),centx+mx*(0.5+extr))
ax.set_ylim(centy-mx*(0.5+extr),centy+mx*(0.5+extr))
# return a figure and axes to the user
return (fig,ax)
def get_num_orbitals(self):
"Returns number of orbitals in the model."
return self._norb
def get_orb(self):
"Returns reduced coordinates of orbitals in format [orbital,coordinate.]"
return self._orb.copy()
def get_lat(self):
"Returns lattice vectors in format [vector,coordinate]."
return self._lat.copy()
def _gen_ham(self,k_input=None):
"""Generate Hamiltonian for a certain k-point,
K-point is given in reduced coordinates!"""
kpnt=np.array(k_input)
if not (k_input is None):
# if kpnt is just a number then convert it to an array
if len(kpnt.shape)==0:
kpnt=np.array([kpnt])
# check that k-vector is of corect size
if kpnt.shape!=(self._dim_k,):
raise Exception("\n\nk-vector of wrong shape!")
else:
if self._dim_k!=0:
raise Exception("\n\nHave to provide a k-vector!")
# zero the Hamiltonian matrix
if self._nspin==1:
ham=np.zeros((self._norb,self._norb),dtype=complex)
elif self._nspin==2:
ham=np.zeros((self._norb,2,self._norb,2),dtype=complex)
# modify diagonal elements
for i in range(self._norb):
if self._nspin==1:
ham[i,i]=self._site_energies[i]
elif self._nspin==2:
ham[i,:,i,:]=self._site_energies[i]
# go over all hoppings
for hopping in self._hoppings:
# get all data for the hopping parameter
if self._nspin==1:
amp=complex(hopping[0])
elif self._nspin==2:
amp=np.array(hopping[0],dtype=complex)
i=hopping[1]
j=hopping[2]
# in 0-dim case there is no phase factor
if self._dim_k>0:
ind_R=np.array(hopping[3],dtype=float)
# vector from one site to another
#rv=-self._orb[i,:]+self._orb[j,:]+ind_R
rv=ind_R
# Take only components of vector which are periodic
rv=rv[self._per]
# Calculate the hopping, see details in info/tb/tb.pdf
phase=np.exp((2.0j)*np.pi*np.dot(kpnt,rv))
amp=amp*phase
# add this hopping into a matrix and also its conjugate
if self._nspin==1:
ham[i,j]+=amp
ham[j,i]+=amp.conjugate()
elif self._nspin==2:
ham[i,:,j,:]+=amp
ham[j,:,i,:]+=amp.T.conjugate()
return ham
def _sol_ham(self,ham,eig_vectors=False):
"""Solves Hamiltonian and returns eigenvectors, eigenvalues"""
# reshape matrix first
if self._nspin==1:
ham_use=ham
elif self._nspin==2:
ham_use=ham.reshape((2*self._norb,2*self._norb))
# check that matrix is hermitian
if np.max(ham_use-ham_use.T.conj())>1.0E-9:
raise Exception("\n\nHamiltonian matrix is not hermitian?!")
#solve matrix
if eig_vectors==False: # only find eigenvalues
eval=np.linalg.eigvalsh(ham_use)
# sort eigenvalues and convert to real numbers
eval=_nicefy_eig(eval)
return np.array(eval,dtype=float)
else: # find eigenvalues and eigenvectors
(eval,eig)=np.linalg.eigh(ham_use)
# transpose matrix eig since otherwise it is confusing
# now eig[i,:] is eigenvector for eval[i]-th eigenvalue
eig=eig.T
# sort evectors, eigenvalues and convert to real numbers
(eval,eig)=_nicefy_eig(eval,eig)
# reshape eigenvectors if doing a spinfull calculation
if self._nspin==2:
eig=eig.reshape((self._nsta,self._norb,2))
return (eval,eig)
def solve_all(self,k_list=None,eig_vectors=False):
r"""
Solves for eigenvalues and (optionally) eigenvectors of the
tight-binding model on a given one-dimensional list of k-vectors.
.. note::
Eigenvectors (wavefunctions) returned by this
function and used throughout the code are exclusively given
in convention 1 as described in section 3.1 of
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`. In other words, they
are in correspondence with cell-periodic functions
:math:`u_{n {\bf k}} ({\bf r})` not
:math:`\Psi_{n {\bf k}} ({\bf r})`.
.. note::
In some cases class :class:`pythtb.wf_array` provides a more
elegant way to deal with eigensolutions on a regular mesh of
k-vectors.
:param k_list: One-dimensional array of k-vectors. Each k-vector
is given in reduced coordinates of the reciprocal space unit
cell. For example, for real space unit cell vectors [1.0,0.0]
and [0.0,2.0] and associated reciprocal space unit vectors
[2.0*pi,0.0] and [0.0,pi], k-vector with reduced coordinates
[0.25,0.25] corresponds to k-vector [0.5*pi,0.25*pi].
Dimensionality of each vector must equal to the number of
periodic directions (i.e. dimensionality of reciprocal space,
*dim_k*).
This parameter shouldn't be specified for system with
zero-dimensional k-space (*dim_k* =0).
:param eig_vectors: Optional boolean parameter, specifying whether
eigenvectors should be returned. If *eig_vectors* is True, then
both eigenvalues and eigenvectors are returned, otherwise only
eigenvalues are returned.
:returns:
* **eval** -- Two dimensional array of eigenvalues for
all bands for all kpoints. Format is eval[band,kpoint] where
first index (band) corresponds to the electron band in
question and second index (kpoint) corresponds to the k-point
as listed in the input parameter *k_list*. Eigenvalues are
sorted from smallest to largest at each k-point seperately.
In the case when reciprocal space is zero-dimensional (as in a
molecule) kpoint index is dropped and *eval* is of the format
eval[band].
* **evec** -- Three dimensional array of eigenvectors for
all bands and all kpoints. If *nspin* equals 1 the format
of *evec* is evec[band,kpoint,orbital] where "band" is the
electron band in question, "kpoint" is index of k-vector
as given in input parameter *k_list*. Finally, "orbital"
refers to the tight-binding orbital basis function.
Ordering of bands is the same as in *eval*.
Eigenvectors evec[n,k,j] correspond to :math:`C^{n {\bf
k}}_{j}` from section 3.1 equation 3.5 and 3.7 of the
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`.
In the case when reciprocal space is zero-dimensional (as in a
molecule) kpoint index is dropped and *evec* is of the format
evec[band,orbital].
In the spinfull calculation (*nspin* equals 2) evec has
additional component evec[...,spin] corresponding to the
spin component of the wavefunction.
Example usage::
# Returns eigenvalues for three k-vectors
eval = tb.solve_all([[0.0, 0.0], [0.0, 0.2], [0.0, 0.5]])
# Returns eigenvalues and eigenvectors for two k-vectors
(eval, evec) = tb.solve_all([[0.0, 0.0], [0.0, 0.2]], eig_vectors=True)
"""
# if not 0-dim case
if not (k_list is None):
nkp=len(k_list) # number of k points
# first initialize matrices for all return data
# indices are [band,kpoint]
ret_eval=np.zeros((self._nsta,nkp),dtype=float)
# indices are [band,kpoint,orbital,spin]
if self._nspin==1:
ret_evec=np.zeros((self._nsta,nkp,self._norb),dtype=complex)
elif self._nspin==2:
ret_evec=np.zeros((self._nsta,nkp,self._norb,2),dtype=complex)
# go over all kpoints
for i,k in enumerate(k_list):
# generate Hamiltonian at that point
ham=self._gen_ham(k)
# solve Hamiltonian
if eig_vectors==False:
eval=self._sol_ham(ham,eig_vectors=eig_vectors)
ret_eval[:,i]=eval[:]
else:
(eval,evec)=self._sol_ham(ham,eig_vectors=eig_vectors)
ret_eval[:,i]=eval[:]
if self._nspin==1:
ret_evec[:,i,:]=evec[:,:]
elif self._nspin==2:
ret_evec[:,i,:,:]=evec[:,:,:]
# return stuff
if eig_vectors==False:
# indices of eval are [band,kpoint]
return ret_eval
else:
# indices of eval are [band,kpoint] for evec are [band,kpoint,orbital,(spin)]
return (ret_eval,ret_evec)
else: # 0 dim case
# generate Hamiltonian
ham=self._gen_ham()
# solve
if eig_vectors==False:
eval=self._sol_ham(ham,eig_vectors=eig_vectors)
# indices of eval are [band]
return eval
else:
(eval,evec)=self._sol_ham(ham,eig_vectors=eig_vectors)
# indices of eval are [band] and of evec are [band,orbital,spin]
return (eval,evec)
def solve_one(self,k_point=None,eig_vectors=False):
r"""
Similar to :func:`pythtb.tb_model.solve_all` but solves tight-binding
model for only one k-vector.
"""
# if not 0-dim case
if not (k_point is None):
if eig_vectors==False:
eval=self.solve_all([k_point],eig_vectors=eig_vectors)
# indices of eval are [band]
return eval[:,0]
else:
(eval,evec)=self.solve_all([k_point],eig_vectors=eig_vectors)
# indices of eval are [band] for evec are [band,orbital,spin]
if self._nspin==1:
return (eval[:,0],evec[:,0,:])
elif self._nspin==2:
return (eval[:,0],evec[:,0,:,:])
else:
# do the same as solve_all
return self.solve_all(eig_vectors=eig_vectors)
def cut_piece(self,num,fin_dir,glue_edgs=False):
r"""
Constructs a (d-1)-dimensional tight-binding model out of a
d-dimensional one by repeating the unit cell a given number of
times along one of the periodic lattice vectors. The real-space
lattice vectors of the returned model are the same as those of
the original model; only the dimensionality of reciprocal space
is reduced.
:param num: How many times to repeat the unit cell.
:param fin_dir: Index of the real space lattice vector along
which you no longer wish to maintain periodicity.
:param glue_edgs: Optional boolean parameter specifying whether to
allow hoppings from one edge to the other of a cut model.
:returns:
* **fin_model** -- Object of type
:class:`pythtb.tb_model` representing a cutout
tight-binding model. Orbitals in *fin_model* are
numbered so that the i-th orbital of the n-th unit
cell has index i+norb*n (here norb is the number of
orbitals in the original model).
Example usage::
A = tb_model(3, 3, ...)
# Construct two-dimensional model B out of three-dimensional
# model A by repeating model along second lattice vector ten times
B = A.cut_piece(10, 1)
# Further cut two-dimensional model B into one-dimensional model
# A by repeating unit cell twenty times along third lattice
# vector and allow hoppings from one edge to the other
C = B.cut_piece(20, 2, glue_edgs=True)
See also these examples: :ref:`haldane_fin-example`,
:ref:`edge-example`.
"""
if self._dim_k ==0:
raise Exception("\n\nModel is already finite")
if type(num).__name__!='int':
raise Exception("\n\nArgument num not an integer")
# check value of num
if num<1:
raise Exception("\n\nArgument num must be positive!")
if num==1 and glue_edgs==True:
raise Exception("\n\nCan't have num==1 and glueing of the edges!")
# generate orbitals of a finite model
fin_orb=[]
onsite=[] # store also onsite energies
for i in range(num): # go over all cells in finite direction
for j in range(self._norb): # go over all orbitals in one cell
# make a copy of j-th orbital
orb_tmp=np.copy(self._orb[j,:])
# change coordinate along finite direction
orb_tmp[fin_dir]+=float(i)
# add to the list
fin_orb.append(orb_tmp)
# do the onsite energies at the same time
onsite.append(self._site_energies[j])
onsite=np.array(onsite)
fin_orb=np.array(fin_orb)
# generate periodic directions of a finite model
fin_per=copy.deepcopy(self._per)
# find if list of periodic directions contains the one you
# want to make finite
if fin_per.count(fin_dir)!=1:
raise Exception("\n\nCan not make model finite along this direction!")
# remove index which is no longer periodic
fin_per.remove(fin_dir)
# generate object of tb_model type that will correspond to a cutout
fin_model=tb_model(self._dim_k-1,
self._dim_r,
copy.deepcopy(self._lat),
fin_orb,
fin_per,
self._nspin)
# remember if came from w90
fin_model._assume_position_operator_diagonal=self._assume_position_operator_diagonal
# now put all onsite terms for the finite model
fin_model.set_onsite(onsite,mode="reset")
# put all hopping terms
for c in range(num): # go over all cells in finite direction
for h in range(len(self._hoppings)): # go over all hoppings in one cell
# amplitude of the hop is the same
amp=self._hoppings[h][0]
# lattice vector of the hopping
ind_R=copy.deepcopy(self._hoppings[h][3])
jump_fin=ind_R[fin_dir] # store by how many cells is the hopping in finite direction
if fin_model._dim_k!=0:
ind_R[fin_dir]=0 # one of the directions now becomes finite
# index of "from" and "to" hopping indices
hi=self._hoppings[h][1] + c*self._norb
# have to compensate for the fact that ind_R in finite direction
# will not be used in the finite model
hj=self._hoppings[h][2] + (c + jump_fin)*self._norb
# decide whether this hopping should be added or not
to_add=True
# if edges are not glued then neglect all jumps that spill out
if glue_edgs==False:
if hj<0 or hj>=self._norb*num:
to_add=False
# if edges are glued then do mod division to wrap up the hopping
else:
hj=int(hj)%int(self._norb*num)
# add hopping to a finite model
if to_add==True:
if fin_model._dim_k==0:
fin_model.set_hop(amp,hi,hj,mode="add",allow_conjugate_pair=True)
else:
fin_model.set_hop(amp,hi,hj,ind_R,mode="add",allow_conjugate_pair=True)
return fin_model
def reduce_dim(self,remove_k,value_k):
r"""
Reduces dimensionality of the model by taking a reciprocal-space
slice of the Bloch Hamiltonian :math:`{\cal H}_{\bf k}`. The Bloch
Hamiltonian (defined in :download:`notes on tight-binding
formalism <misc/pythtb-formalism.pdf>` in section 3.1 equation 3.7) of a
d-dimensional model is a function of d-dimensional k-vector.
This function returns a d-1 dimensional tight-binding model obtained
by constraining one of k-vector components in :math:`{\cal H}_{\bf
k}` to be a constant.
:param remove_k: Which reciprocal space unit vector component
you wish to keep constant.
:param value_k: Value of the k-vector component to which you are
constraining this model. Must be given in reduced coordinates.
:returns:
* **red_tb** -- Object of type :class:`pythtb.tb_model`
representing a reduced tight-binding model.
Example usage::
# Constrains second k-vector component to equal 0.3
red_tb = tb.reduce_dim(1, 0.3)
"""
#
if self._dim_k==0:
raise Exception("\n\nCan not reduce dimensionality even further!")
# make a copy
red_tb=copy.deepcopy(self)
# make one of the directions not periodic
red_tb._per.remove(remove_k)
red_tb._dim_k=len(red_tb._per)
# check that really removed one and only one direction
if red_tb._dim_k!=self._dim_k-1:
raise Exception("\n\nSpecified wrong dimension to reduce!")
# specify hopping terms from scratch
red_tb._hoppings=[]
# set all hopping parameters for this value of value_k
for h in range(len(self._hoppings)):
hop=self._hoppings[h]
if self._nspin==1:
amp=complex(hop[0])
elif self._nspin==2:
amp=np.array(hop[0],dtype=complex)
i=hop[1]; j=hop[2]
ind_R=np.array(hop[3],dtype=int)
# vector from one site to another
rv=-red_tb._orb[i,:]+red_tb._orb[j,:]+np.array(ind_R,dtype=float)
# take only r-vector component along direction you are not making periodic
rv=rv[remove_k]
# Calculate the part of hopping phase, only for this direction
phase=np.exp((2.0j)*np.pi*(value_k*rv))
# store modified version of the hop
# Since we are getting rid of one dimension, it could be that now
# one of the hopping terms became onsite term because one direction
# is no longer periodic
if i==j and (False not in (np.array(ind_R[red_tb._per],dtype=int)==0)):
if ind_R[remove_k]==0:
# in this case this is really an onsite term
red_tb.set_onsite(amp*phase,i,mode="add")
else:
# in this case must treat both R and -R because that term would
# have been counted twice without dimensional reduction
if self._nspin==1:
red_tb.set_onsite(amp*phase+(amp*phase).conj(),i,mode="add")
elif self._nspin==2:
red_tb.set_onsite(amp*phase+(amp.T*phase).conj(),i,mode="add")
else:
# just in case make the R vector zero along the reduction dimension
ind_R[remove_k]=0
# add hopping term
red_tb.set_hop(amp*phase,i,j,ind_R,mode="add",allow_conjugate_pair=True)
return red_tb
def make_supercell(self, sc_red_lat, return_sc_vectors=False, to_home=True):
r"""
Returns tight-binding model :class:`pythtb.tb_model`
representing a super-cell of a current object. This function
can be used together with *cut_piece* in order to create slabs
with arbitrary surfaces.
By default all orbitals will be shifted to the home cell after
unit cell has been created. That way all orbitals will have
reduced coordinates between 0 and 1. If you wish to avoid this
behavior, you need to set, *to_home* argument to *False*.
:param sc_red_lat: Array of integers with size *dim_r*dim_r*
defining a super-cell lattice vectors in terms of reduced
coordinates of the original tight-binding model. First index
in the array specifies super-cell vector, while second index
specifies coordinate of that super-cell vector. If
*dim_k<dim_r* then still need to specify full array with
size *dim_r*dim_r* for consistency, but non-periodic
directions must have 0 on off-diagonal elemets s and 1 on
diagonal.
:param return_sc_vectors: Optional parameter. Default value is
*False*. If *True* returns also lattice vectors inside the
super-cell. Internally, super-cell tight-binding model will
have orbitals repeated in the same order in which these
super-cell vectors are given, but if argument *to_home*
is set *True* (which it is by default) then additionally,
orbitals will be shifted to the home cell.
:param to_home: Optional parameter, if *True* will
shift all orbitals to the home cell. Default value is *True*.
:returns:
* **sc_tb** -- Object of type :class:`pythtb.tb_model`
representing a tight-binding model in a super-cell.
* **sc_vectors** -- Super-cell vectors, returned only if
*return_sc_vectors* is set to *True* (default value is
*False*).
Example usage::
# Creates super-cell out of 2d tight-binding model tb
sc_tb = tb.make_supercell([[2, 1], [-1, 2]])
"""
# Can't make super cell for model without periodic directions
if self._dim_r==0:
raise Exception("\n\nMust have at least one periodic direction to make a super-cell")
# convert array to numpy array
use_sc_red_lat=np.array(sc_red_lat)
# checks on super-lattice array
if use_sc_red_lat.shape!=(self._dim_r,self._dim_r):
raise Exception("\n\nDimension of sc_red_lat array must be dim_r*dim_r")
if use_sc_red_lat.dtype!=int:
raise Exception("\n\nsc_red_lat array elements must be integers")
for i in range(self._dim_r):
for j in range(self._dim_r):
if (i==j) and (i not in self._per) and use_sc_red_lat[i,j]!=1:
raise Exception("\n\nDiagonal elements of sc_red_lat for non-periodic directions must equal 1.")
if (i!=j) and ((i not in self._per) or (j not in self._per)) and use_sc_red_lat[i,j]!=0:
raise Exception("\n\nOff-diagonal elements of sc_red_lat for non-periodic directions must equal 0.")
if np.abs(np.linalg.det(use_sc_red_lat))<1.0E-6:
raise Exception("\n\nSuper-cell lattice vectors length/area/volume too close to zero, or zero.")
if np.linalg.det(use_sc_red_lat)<0.0:
raise Exception("\n\nSuper-cell lattice vectors need to form right handed system.")
# converts reduced vector in original lattice to reduced vector in super-cell lattice
def to_red_sc(red_vec_orig):
return np.linalg.solve(np.array(use_sc_red_lat.T,dtype=float),
np.array(red_vec_orig,dtype=float))
# conservative estimate on range of search for super-cell vectors
max_R=np.max(np.abs(use_sc_red_lat))*self._dim_r
# candidates for super-cell vectors
# this is hard-coded and can be improved!
sc_cands=[]
if self._dim_r==1:
for i in range(-max_R,max_R+1):
sc_cands.append(np.array([i]))
elif self._dim_r==2:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j]))
elif self._dim_r==3:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
for k in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j,k]))
elif self._dim_r==4:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
for k in range(-max_R,max_R+1):
for l in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j,k,l]))
else:
raise Exception("\n\nWrong dimensionality of dim_r!")
# find all vectors inside super-cell
# store them here
sc_vec=[]
eps_shift=np.sqrt(2.0)*1.0E-8 # shift of the grid, so to avoid double counting
#
for vec in sc_cands:
# compute reduced coordinates of this candidate vector in the super-cell frame
tmp_red=to_red_sc(vec).tolist()
# check if in the interior
inside=True
for t in tmp_red:
if t<=-1.0*eps_shift or t>1.0-eps_shift:
inside=False
if inside==True:
sc_vec.append(np.array(vec))
# number of times unit cell is repeated in the super-cell
num_sc=len(sc_vec)
# check that found enough super-cell vectors
if int(round(np.abs(np.linalg.det(use_sc_red_lat))))!=num_sc:
raise Exception("\n\nSuper-cell generation failed! Wrong number of super-cell vectors found.")
# cartesian vectors of the super lattice
sc_cart_lat=np.dot(use_sc_red_lat,self._lat)
# orbitals of the super-cell tight-binding model
sc_orb=[]
for cur_sc_vec in sc_vec: # go over all super-cell vectors
for orb in self._orb: # go over all orbitals
# shift orbital and compute coordinates in
# reduced coordinates of super-cell
sc_orb.append(to_red_sc(orb+cur_sc_vec))
# create super-cell tb_model object to be returned
sc_tb=tb_model(self._dim_k,self._dim_r,sc_cart_lat,sc_orb,per=self._per,nspin=self._nspin)
# remember if came from w90
sc_tb._assume_position_operator_diagonal=self._assume_position_operator_diagonal
# repeat onsite energies
for i in range(num_sc):
for j in range(self._norb):
sc_tb.set_onsite(self._site_energies[j],i*self._norb+j)
# set hopping terms
for c,cur_sc_vec in enumerate(sc_vec): # go over all super-cell vectors
for h in range(len(self._hoppings)): # go over all hopping terms of the original model
# amplitude of the hop is the same
amp=self._hoppings[h][0]
# lattice vector of the hopping
ind_R=copy.deepcopy(self._hoppings[h][3])
# super-cell component of hopping lattice vector
# shift also by current super cell vector
sc_part=np.floor(to_red_sc(ind_R+cur_sc_vec)) # round down!
sc_part=np.array(sc_part,dtype=int)
# find remaining vector in the original reduced coordinates
orig_part=ind_R+cur_sc_vec-np.dot(sc_part,use_sc_red_lat)
# remaining vector must equal one of the super-cell vectors
pair_ind=None
for p,pair_sc_vec in enumerate(sc_vec):
if False not in (pair_sc_vec==orig_part):
if pair_ind!=None:
raise Exception("\n\nFound duplicate super cell vector!")
pair_ind=p
if pair_ind==None:
raise Exception("\n\nDid not find super cell vector!")
# index of "from" and "to" hopping indices
hi=self._hoppings[h][1] + c*self._norb
hj=self._hoppings[h][2] + pair_ind*self._norb
# add hopping term
sc_tb.set_hop(amp,hi,hj,sc_part,mode="add",allow_conjugate_pair=True)
# put orbitals to home cell if asked for
if to_home==True:
sc_tb._shift_to_home()
# return new tb model and vectors if needed
if return_sc_vectors==False:
return sc_tb
else:
return (sc_tb,sc_vec)
def _shift_to_home(self):
"""Shifts all orbital positions to the home unit cell. After
this function is called all reduced coordiantes of orbitals
will be between 0 and 1. It may be useful to call this
function after using make_supercell."""
# go over all orbitals
for i in range(self._norb):
cur_orb=self._orb[i]
# compute orbital in the home cell
round_orb=(np.array(cur_orb)+1.0E-6)%1.0
# find displacement vector needed to bring back to home cell
disp_vec=np.array(np.round(cur_orb-round_orb),dtype=int)
# check if have at least one non-zero component
if True in (disp_vec!=0):
# shift orbital
self._orb[i]-=np.array(disp_vec,dtype=float)
# shift also hoppings
if self._dim_k!=0:
for h in range(len(self._hoppings)):
if self._hoppings[h][1]==i:
self._hoppings[h][3]-=disp_vec
if self._hoppings[h][2]==i:
self._hoppings[h][3]+=disp_vec
def k_uniform_mesh(self,mesh_size):
r"""
Returns a uniform grid of k-points that can be passed to
passed to function :func:`pythtb.tb_model.solve_all`. This
function is useful for plotting density of states histogram
and similar.
Returned uniform grid of k-points always contains the origin.
:param mesh_size: Number of k-points in the mesh in each
periodic direction of the model.
:returns:
* **k_vec** -- Array of k-vectors on the mesh that can be
directly passed to function :func:`pythtb.tb_model.solve_all`.
Example usage::
# returns a 10x20x30 mesh of a tight binding model
# with three periodic directions
k_vec = my_model.k_uniform_mesh([10,20,30])
# solve model on the uniform mesh
my_model.solve_all(k_vec)
"""
# get the mesh size and checks for consistency
use_mesh=np.array(list(map(round,mesh_size)),dtype=int)
if use_mesh.shape!=(self._dim_k,):
print(use_mesh.shape)
raise Exception("\n\nIncorrect size of the specified k-mesh!")
if np.min(use_mesh)<=0:
raise Exception("\n\nMesh must have positive non-zero number of elements.")
# construct the mesh
if self._dim_k==1:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[1])
norm=norm.transpose([1,0])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,0]).reshape([use_mesh[0],1])
elif self._dim_k==2:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[2])
norm=norm.transpose([2,0,1])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,0]).reshape([use_mesh[0]*use_mesh[1],2])
elif self._dim_k==3:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1],0:use_mesh[2]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[3])
norm=norm.transpose([3,0,1,2])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,3,0]).reshape([use_mesh[0]*use_mesh[1]*use_mesh[2],3])
else:
raise Exception("\n\nUnsupported dim_k!")
return k_vec
def k_path(self,kpts,nk,report=True):
r"""
Interpolates a path in reciprocal space between specified
k-points. In 2D or 3D the k-path can consist of several
straight segments connecting high-symmetry points ("nodes"),
and the results can be used to plot the bands along this path.
The interpolated path that is returned contains as
equidistant k-points as possible.
:param kpts: Array of k-vectors in reciprocal space between
which interpolated path should be constructed. These
k-vectors must be given in reduced coordinates. As a
special case, in 1D k-space kpts may be a string:
* *"full"* -- Implies *[ 0.0, 0.5, 1.0]* (full BZ)
* *"fullc"* -- Implies *[-0.5, 0.0, 0.5]* (full BZ, centered)
* *"half"* -- Implies *[ 0.0, 0.5]* (half BZ)
:param nk: Total number of k-points to be used in making the plot.
:param report: Optional parameter specifying whether printout
is desired (default is True).
:returns:
* **k_vec** -- Array of (nearly) equidistant interpolated
k-points. The distance between the points is calculated in
the Cartesian frame, however coordinates themselves are
given in dimensionless reduced coordinates! This is done
so that this array can be directly passed to function
:func:`pythtb.tb_model.solve_all`.
* **k_dist** -- Array giving accumulated k-distance to each
k-point in the path. Unlike array *k_vec* this one has
dimensions! (Units are defined here so that for an
one-dimensional crystal with lattice constant equal to for
example *10* the length of the Brillouin zone would equal
*1/10=0.1*. In other words factors of :math:`2\pi` are
absorbed into *k*.) This array can be used to plot path in
the k-space so that the distances between the k-points in
the plot are exact.
* **k_node** -- Array giving accumulated k-distance to each
node on the path in Cartesian coordinates. This array is
typically used to plot nodes (typically special points) on
the path in k-space.
Example usage::
# Construct a path connecting four nodal points in k-space
# Path will contain 401 k-points, roughly equally spaced
path = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.5], [0.0, 0.0]]
(k_vec,k_dist,k_node) = my_model.k_path(path,401)
# solve for eigenvalues on that path
evals = tb.solve_all(k_vec)
# then use evals, k_dist, and k_node to plot bandstructure
# (see examples)
"""
# processing of special cases for kpts
if kpts=='full':
# full Brillouin zone for 1D case
k_list=np.array([[0.],[0.5],[1.]])
elif kpts=='fullc':
# centered full Brillouin zone for 1D case
k_list=np.array([[-0.5],[0.],[0.5]])
elif kpts=='half':
# half Brillouin zone for 1D case
k_list=np.array([[0.],[0.5]])
else:
k_list=np.array(kpts)
# in 1D case if path is specified as a vector, convert it to an (n,1) array
if len(k_list.shape)==1 and self._dim_k==1:
k_list=np.array([k_list]).T
# make sure that k-points in the path have correct dimension
if k_list.shape[1]!=self._dim_k:
print('input k-space dimension is',k_list.shape[1])
print('k-space dimension taken from model is',self._dim_k)
raise Exception("\n\nk-space dimensions do not match")
# must have more k-points in the path than number of nodes
if nk<k_list.shape[0]:
raise Exception("\n\nMust have more points in the path than number of nodes.")
# number of nodes
n_nodes=k_list.shape[0]
# extract the lattice vectors from the TB model
lat_per=np.copy(self._lat)
# choose only those that correspond to periodic directions
lat_per=lat_per[self._per]
# compute k_space metric tensor
k_metric = np.linalg.inv(np.dot(lat_per,lat_per.T))
# Find distances between nodes and set k_node, which is
# accumulated distance since the start of the path
# initialize array k_node
k_node=np.zeros(n_nodes,dtype=float)
for n in range(1,n_nodes):
dk = k_list[n]-k_list[n-1]
dklen = np.sqrt(np.dot(dk,np.dot(k_metric,dk)))
k_node[n]=k_node[n-1]+dklen
# Find indices of nodes in interpolated list
node_index=[0]
for n in range(1,n_nodes-1):
frac=k_node[n]/k_node[-1]
node_index.append(int(round(frac*(nk-1))))
node_index.append(nk-1)
# initialize two arrays temporarily with zeros
# array giving accumulated k-distance to each k-point
k_dist=np.zeros(nk,dtype=float)
# array listing the interpolated k-points
k_vec=np.zeros((nk,self._dim_k),dtype=float)
# go over all kpoints
k_vec[0]=k_list[0]
for n in range(1,n_nodes):
n_i=node_index[n-1]
n_f=node_index[n]
kd_i=k_node[n-1]
kd_f=k_node[n]
k_i=k_list[n-1]
k_f=k_list[n]
for j in range(n_i,n_f+1):
frac=float(j-n_i)/float(n_f-n_i)
k_dist[j]=kd_i+frac*(kd_f-kd_i)
k_vec[j]=k_i+frac*(k_f-k_i)
if report==True:
if self._dim_k==1:
print(' Path in 1D BZ defined by nodes at '+str(k_list.flatten()))
else:
print('----- k_path report begin ----------')
original=np.get_printoptions()
np.set_printoptions(precision=5)
print('real-space lattice vectors\n', lat_per)
print('k-space metric tensor\n', k_metric)
print('internal coordinates of nodes\n', k_list)
if (lat_per.shape[0]==lat_per.shape[1]):
# lat_per is invertible
lat_per_inv=np.linalg.inv(lat_per).T
print('reciprocal-space lattice vectors\n', lat_per_inv)
# cartesian coordinates of nodes
kpts_cart=np.tensordot(k_list,lat_per_inv,axes=1)
print('cartesian coordinates of nodes\n',kpts_cart)
print('list of segments:')
for n in range(1,n_nodes):
dk=k_node[n]-k_node[n-1]
dk_str=_nice_float(dk,7,5)
print(' length = '+dk_str+' from ',k_list[n-1],' to ',k_list[n])
print('node distance list:', k_node)
print('node index list: ', np.array(node_index))
np.set_printoptions(precision=original["precision"])
print('----- k_path report end ------------')
print()
return (k_vec,k_dist,k_node)
def ignore_position_operator_offdiagonal(self):
"""Call to this function enables one to approximately compute
Berry-like objects from tight-binding models that were
obtained from Wannier90."""
self._assume_position_operator_diagonal=True
def position_matrix(self, evec, dir):
r"""
Returns matrix elements of the position operator along
direction *dir* for eigenvectors *evec* at a single k-point.
Position operator is defined in reduced coordinates.
The returned object :math:`X` is
.. math::
X_{m n {\bf k}}^{\alpha} = \langle u_{m {\bf k}} \vert
r^{\alpha} \vert u_{n {\bf k}} \rangle
Here :math:`r^{\alpha}` is the position operator along direction
:math:`\alpha` that is selected by *dir*.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing the center.
This integer must not be one of the periodic directions
since position operator matrix element in that case is not
well defined.
:returns:
* **pos_mat** -- Position operator matrix :math:`X_{m n}` as defined
above. This is a square matrix with size determined by number of bands
given in *evec* input array. First index of *pos_mat* corresponds to
bra vector (*m*) and second index to ket (*n*).
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes position operator matrix elements for 3-rd kpoint
# and bottom five bands along first coordinate
pos_mat = my_model.position_matrix(evecs[:5,2], 0)
See also this example: :ref:`haldane_hwf-example`,
"""
# make sure specified direction is not periodic!
if dir in self._per:
raise Exception("Can not compute position matrix elements along periodic direction!")
# make sure direction is not out of range
if dir<0 or dir>=self._dim_r:
raise Exception("Direction out of range!")
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# get coordinates of orbitals along the specified direction
pos_tmp=self._orb[:,dir]
# reshape arrays in the case of spinfull calculation
if self._nspin==2:
# tile along spin direction if needed
pos_use=np.tile(pos_tmp,(2,1)).transpose().flatten()
# also flatten the state along the spin index
evec_use=evec.reshape((evec.shape[0],evec.shape[1]*evec.shape[2]))
else:
pos_use=pos_tmp
evec_use=evec
# position matrix elements
pos_mat=np.zeros((evec_use.shape[0],evec_use.shape[0]),dtype=complex)
# go over all bands
for i in range(evec_use.shape[0]):
for j in range(evec_use.shape[0]):
pos_mat[i,j]=np.dot(evec_use[i].conj(),pos_use*evec_use[j])
# make sure matrix is hermitian
if np.max(pos_mat-pos_mat.T.conj())>1.0E-9:
raise Exception("\n\n Position matrix is not hermitian?!")
return pos_mat
def position_expectation(self,evec,dir):
r"""
Returns diagonal matrix elements of the position operator.
These elements :math:`X_{n n}` can be interpreted as an
average position of n-th Bloch state *evec[n]* along
direction *dir*. Generally speaking these centers are *not*
hybrid Wannier function centers (which are instead
returned by :func:`pythtb.tb_model.position_hwf`).
See function :func:`pythtb.tb_model.position_matrix` for
definition of matrix :math:`X`.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing matrix
elements. This integer must not be one of the periodic
directions since position operator matrix element in that
case is not well defined.
:returns:
* **pos_exp** -- Diagonal elements of the position operator matrix :math:`X`.
Length of this vector is determined by number of bands given in *evec* input
array.
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes average position for 3-rd kpoint
# and bottom five bands along first coordinate
pos_exp = my_model.position_expectation(evecs[:5,2], 0)
See also this example: :ref:`haldane_hwf-example`.
"""
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
pos_exp=self.position_matrix(evec,dir).diagonal()
return np.array(np.real(pos_exp),dtype=float)
def position_hwf(self,evec,dir,hwf_evec=False,basis="orbital"):
r"""
Returns eigenvalues and optionally eigenvectors of the
position operator matrix :math:`X` in either Bloch or orbital
basis. These eigenvectors can be interpreted as linear
combinations of Bloch states *evec* that have minimal extent (or
spread :math:`\Omega` in the sense of maximally localized
Wannier functions) along direction *dir*. The eigenvalues are
average positions of these localized states.
Note that these eigenvectors are not maximally localized
Wannier functions in the usual sense because they are
localized only along one direction. They are also not the
average positions of the Bloch states *evec*, which are
instead computed by :func:`pythtb.tb_model.position_expectation`.
See function :func:`pythtb.tb_model.position_matrix` for
the definition of the matrix :math:`X`.
See also Fig. 3 in Phys. Rev. Lett. 102, 107603 (2009) for a
discussion of the hybrid Wannier function centers in the
context of a Chern insulator.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing matrix
elements. This integer must not be one of the periodic
directions since position operator matrix element in that
case is not well defined.
:param hwf_evec: Optional boolean variable. If set to *True*
this function will return not only eigenvalues but also
eigenvectors of :math:`X`. Default value is *False*.
:param basis: Optional parameter. If basis="bloch" then hybrid
Wannier function *hwf_evec* is written in the Bloch basis. I.e.
hwf[i,j] correspond to the weight of j-th Bloch state from *evec*
in the i-th hybrid Wannier function. If basis="orbital" and nspin=1 then
hwf[i,orb] correspond to the weight of orb-th orbital in the i-th
hybrid Wannier function. If basis="orbital" and nspin=2 then
hwf[i,orb,spin] correspond to the weight of orb-th orbital, spin-th
spin component in the i-th hybrid Wannier function. Default value
is "orbital".
:returns:
* **hwfc** -- Eigenvalues of the position operator matrix :math:`X`
(also called hybrid Wannier function centers).
Length of this vector equals number of bands given in *evec* input
array. Hybrid Wannier function centers are ordered in ascending order.
Note that in general *n*-th hwfc does not correspond to *n*-th electronic
state *evec*.
* **hwf** -- Eigenvectors of the position operator matrix :math:`X`.
(also called hybrid Wannier functions). These are returned only if
parameter *hwf_evec* is set to *True*.
The shape of this array is [h,x] or [h,x,s] depending on value of *basis*
and *nspin*. If *basis* is "bloch" then x refers to indices of
Bloch states *evec*. If *basis* is "orbital" then *x* (or *x* and *s*)
correspond to orbital index (or orbital and spin index if *nspin* is 2).
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes hybrid Wannier centers (and functions) for 3-rd kpoint
# and bottom five bands along first coordinate
(hwfc, hwf) = my_model.position_hwf(evecs[:5,2], 0, hwf_evec=True, basis="orbital")
See also this example: :ref:`haldane_hwf-example`,
"""
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# get position matrix
pos_mat=self.position_matrix(evec,dir)
# diagonalize
if hwf_evec==False:
hwfc=np.linalg.eigvalsh(pos_mat)
# sort eigenvalues and convert to real numbers
hwfc=_nicefy_eig(hwfc)
return np.array(hwfc,dtype=float)
else: # find eigenvalues and eigenvectors
(hwfc,hwf)=np.linalg.eigh(pos_mat)
# transpose matrix eig since otherwise it is confusing
# now eig[i,:] is eigenvector for eval[i]-th eigenvalue
hwf=hwf.T
# sort evectors, eigenvalues and convert to real numbers
(hwfc,hwf)=_nicefy_eig(hwfc,hwf)
# convert to right basis
if basis.lower().strip()=="bloch":
return (hwfc,hwf)
elif basis.lower().strip()=="orbital":
if self._nspin==1:
ret_hwf=np.zeros((hwf.shape[0],self._norb),dtype=complex)
# sum over bloch states to get hwf in orbital basis
for i in range(ret_hwf.shape[0]):
ret_hwf[i]=np.dot(hwf[i],evec)
hwf=ret_hwf
else:
ret_hwf=np.zeros((hwf.shape[0],self._norb*2),dtype=complex)
# get rid of spin indices
evec_use=evec.reshape([hwf.shape[0],self._norb*2])
# sum over states
for i in range(ret_hwf.shape[0]):
ret_hwf[i]=np.dot(hwf[i],evec_use)
# restore spin indices
hwf=ret_hwf.reshape([hwf.shape[0],self._norb,2])
return (hwfc,hwf)
else:
raise Exception("\n\nBasis must be either bloch or orbital!")
# keeping old name for backwards compatibility
# will be removed in future
tb_model.set_sites=tb_model.set_onsite
tb_model.add_hop=tb_model.set_hop
tbmodel=tb_model
class wf_array(object):
r"""
This class is used to solve a tight-binding model
:class:`pythtb.tb_model` on a regular or non-regular grid
of points in reciprocal space and/or parameter space, and
perform on it various calculations. For example it can be
used to calculate the Berry phase, Berry curvature, 1st Chern
number, etc.
*Regular k-space grid*:
If the grid is a regular k-mesh (no parametric dimensions),
a single call to the function
:func:`pythtb.wf_array.solve_on_grid` will both construct a
k-mesh that uniformly covers the Brillouin zone, and populate
it with wavefunctions (eigenvectors) computed on this grid.
The last point in each k-dimension is set so that it represents
the same Bloch function as the first one (this involves the
insertion of some orbital-position-dependent phase factors).
Example :ref:`haldane_bp-example` shows how to use wf_array on
a regular grid of points in k-space. Examples :ref:`cone-example`
and :ref:`3site_cycle-example` show how to use non-regular grid of
points.
*Parametric or irregular k-space grid grid*:
An irregular grid of points, or a grid that includes also
one or more parametric dimensions, can be populated manually
with the help of the *[]* operator. For example, to copy
eigenvectors *evec* into coordinate (2,3) in the *wf_array*
object *wf* one can simply do::
wf[2,3]=evec
The eigenvectors (wavefunctions) *evec* in the example above
are expected to be in the format *evec[band,orbital]*
(or *evec[band,orbital,spin]* for the spinfull calculation).
This is the same format as returned by
:func:`pythtb.tb_model.solve_one` or
:func:`pythtb.tb_model.solve_all` (in the latter case one
needs to restrict it to a single k-point as *evec[:,kpt,:]*
if the model has *dim_k>=1*).
If wf_array is used for closed paths, either in a
reciprocal-space or parametric direction, then one needs to
include both the starting and ending eigenfunctions even though
they are physically equivalent. If the array dimension in
question is a k-vector direction and the path traverses the
Brillouin zone in a primitive reciprocal-lattice direction,
:func:`pythtb.wf_array.impose_pbc` can be used to associate
the starting and ending points with each other; if it is a
non-winding loop in k-space or a loop in parameter space,
then :func:`pythtb.wf_array.impose_loop` can be used instead.
(These may not be necessary if only Berry fluxes are needed.)
Example :ref:`3site_cycle-example` shows how one
of the directions of *wf_array* object need not be a k-vector
direction, but can instead be a Hamiltonian parameter :math:`\lambda`
(see also discussion after equation 4.1 in :download:`notes on
tight-binding formalism <misc/pythtb-formalism.pdf>`).
:param model: Object of type :class:`pythtb.tb_model` representing
tight-binding model associated with this array of eigenvectors.
:param mesh_arr: Array giving a dimension of the grid of points in
each reciprocal-space or parametric direction.
Example usage::
# Construct wf_array capable of storing an 11x21 array of
# wavefunctions
wf = wf_array(tb, [11, 21])
# populate this wf_array with regular grid of points in
# Brillouin zone
wf.solve_on_grid([0.0, 0.0])
# Compute set of eigenvectors at one k-point
(eval, evec) = tb.solve_one([kx, ky], eig_vectors = True)
# Store it manually into a specified location in the array
wf[3, 4] = evec
# To access the eigenvectors from the same position
print wf[3, 4]
"""
def __init__(self,model,mesh_arr):
# number of electronic states for each k-point
self._nsta=model._nsta
# number of spin components
self._nspin=model._nspin
# number of orbitals
self._norb=model._norb
# store orbitals from the model
self._orb=np.copy(model._orb)
# store entire model as well
self._model=copy.deepcopy(model)
# store dimension of array of points on which to keep wavefunctions
self._mesh_arr=np.array(mesh_arr)
self._dim_arr=len(self._mesh_arr)
# all dimensions should be 2 or larger, because pbc can be used
if True in (self._mesh_arr<=1).tolist():
raise Exception("\n\nDimension of wf_array object in each direction must be 2 or larger.")
# generate temporary array used later to generate object ._wfs
wfs_dim=np.copy(self._mesh_arr)
wfs_dim=np.append(wfs_dim,self._nsta)
wfs_dim=np.append(wfs_dim,self._norb)
if self._nspin==2:
wfs_dim=np.append(wfs_dim,self._nspin)
# store wavefunctions here in the form _wfs[kx_index,ky_index, ... ,band,orb,spin]
self._wfs=np.zeros(wfs_dim,dtype=complex)
def solve_on_grid(self,start_k):
r"""
Solve a tight-binding model on a regular mesh of k-points covering
the entire reciprocal-space unit cell. Both points at the opposite
sides of reciprocal-space unit cell are included in the array.
This function also automatically imposes periodic boundary
conditions on the eigenfunctions. See also the discussion in
:func:`pythtb.wf_array.impose_pbc`.
:param start_k: Origin of a regular grid of points in the reciprocal space.
:returns:
* **gaps** -- returns minimal direct bandgap between n-th and n+1-th
band on all the k-points in the mesh. Note that in the case of band
crossings one may have to use very dense k-meshes to resolve
the crossing.
Example usage::
# Solve eigenvectors on a regular grid anchored
# at a given point
wf.solve_on_grid([-0.5, -0.5])
"""
# check dimensionality
if self._dim_arr!=self._model._dim_k:
raise Exception("\n\nIf using solve_on_grid method, dimension of wf_array must equal dim_k of the tight-binding model!")
# to return gaps at all k-points
if self._norb<=1:
all_gaps=None # trivial case since there is only one band
else:
gap_dim=np.copy(self._mesh_arr)-1
gap_dim=np.append(gap_dim,self._norb*self._nspin-1)
all_gaps=np.zeros(gap_dim,dtype=float)
#
if self._dim_arr==1:
# don't need to go over the last point because that will be
# computed in the impose_pbc call
for i in range(self._mesh_arr[0]-1):
# generate a kpoint
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1)]
# solve at that point
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
# store wavefunctions
self[i]=evec
# store gaps
if all_gaps is not None:
all_gaps[i,:]=eval[1:]-eval[:-1]
# impose boundary conditions
self.impose_pbc(0,self._model._per[0])
elif self._dim_arr==2:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j]=evec
if all_gaps is not None:
all_gaps[i,j,:]=eval[1:]-eval[:-1]
for dir in range(2):
self.impose_pbc(dir,self._model._per[dir])
elif self._dim_arr==3:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
for k in range(self._mesh_arr[2]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1),\
start_k[2]+float(k)/float(self._mesh_arr[2]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j,k]=evec
if all_gaps is not None:
all_gaps[i,j,k,:]=eval[1:]-eval[:-1]
for dir in range(3):
self.impose_pbc(dir,self._model._per[dir])
elif self._dim_arr==4:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
for k in range(self._mesh_arr[2]-1):
for l in range(self._mesh_arr[3]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1),\
start_k[2]+float(k)/float(self._mesh_arr[2]-1),\
start_k[3]+float(l)/float(self._mesh_arr[3]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j,k,l]=evec
if all_gaps is not None:
all_gaps[i,j,k,l,:]=eval[1:]-eval[:-1]
for dir in range(4):
self.impose_pbc(dir,self._model._per[dir])
else:
raise Exception("\n\nWrong dimensionality!")
return all_gaps.min(axis=tuple(range(self._dim_arr)))
def __check_key(self,key):
# do some checks for 1D
if self._dim_arr==1:
if type(key).__name__!='int':
raise TypeError("Key should be an integer!")
if key<(-1)*self._mesh_arr[0] or key>=self._mesh_arr[0]:
raise IndexError("Key outside the range!")
# do checks for higher dimension
else:
if len(key)!=self._dim_arr:
raise TypeError("Wrong dimensionality of key!")
for i,k in enumerate(key):
if type(k).__name__!='int':
raise TypeError("Key should be set of integers!")
if k<(-1)*self._mesh_arr[i] or k>=self._mesh_arr[i]:
raise IndexError("Key outside the range!")
def __getitem__(self,key):
# check that key is in the correct range
self.__check_key(key)
# return wavefunction
return self._wfs[key]
def __setitem__(self,key,value):
# check that key is in the correct range
self.__check_key(key)
# store wavefunction
self._wfs[key]=np.array(value,dtype=complex)
def impose_pbc(self,mesh_dir,k_dir):
r"""
If the *wf_array* object was populated using the
:func:`pythtb.wf_array.solve_on_grid` method, this function
should not be used since it will be called automatically by
the code.
The eigenfunctions :math:`\Psi_{n {\bf k}}` are by convention
chosen to obey a periodic gauge, i.e.,
:math:`\Psi_{n,{\bf k+G}}=\Psi_{n {\bf k}}` not only up to a
phase, but they are also equal in phase. It follows that
the cell-periodic Bloch functions are related by
:math:`u_{n,{\bf k+G}}=e^{-i{\bf G}\cdot{\bf r}}\Psi_{n {\bf k}}`.
See :download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` section 4.4 and equation 4.18 for
more detail. This routine sets the cell-periodic Bloch function
at the end of the string in direction :math:`{\bf G}` according
to this formula, overwriting the previous value.
This function will impose these periodic boundary conditions along
one direction of the array. We are assuming that the k-point
mesh increases by exactly one reciprocal lattice vector along
this direction. This is currently **not** checked by the code;
it is the responsibility of the user. Currently *wf_array*
does not store the k-vectors on which the model was solved;
it only stores the eigenvectors (wavefunctions).
:param mesh_dir: Direction of wf_array along which you wish to
impose periodic boundary conditions.
:param k_dir: Corresponding to the periodic k-vector direction
in the Brillouin zone of the underlying *tb_model*. Since
version 1.7.0 this parameter is defined so that it is
specified between 0 and *dim_r-1*.
See example :ref:`3site_cycle-example`, where the periodic boundary
condition is applied only along one direction of *wf_array*.
Example usage::
# Imposes periodic boundary conditions along the mesh_dir=0
# direction of the wf_array object, assuming that along that
# direction the k_dir=1 component of the k-vector is increased
# by one reciprocal lattice vector. This could happen, for
# example, if the underlying tb_model is two dimensional but
# wf_array is a one-dimensional path along k_y direction.
wf.impose_pbc(mesh_dir=0,k_dir=1)
"""
if k_dir not in self._model._per:
raise Exception("Periodic boundary condition can be specified only along periodic directions!")
# Compute phase factors
ffac=np.exp(-2.j*np.pi*self._orb[:,k_dir])
if self._nspin==1:
phase=ffac
else:
# for spinors, same phase multiplies both components
phase=np.zeros((self._norb,2),dtype=complex)
phase[:,0]=ffac
phase[:,1]=ffac
# Copy first eigenvector onto last one, multiplying by phase factors
# We can use numpy broadcasting since the orbital index is last
if mesh_dir==0:
self._wfs[-1,...]=self._wfs[0,...]*phase
elif mesh_dir==1:
self._wfs[:,-1,...]=self._wfs[:,0,...]*phase
elif mesh_dir==2:
self._wfs[:,:,-1,...]=self._wfs[:,:,0,...]*phase
elif mesh_dir==3:
self._wfs[:,:,:,-1,...]=self._wfs[:,:,:,0,...]*phase
else:
raise Exception("\n\nWrong value of mesh_dir.")
def impose_loop(self,mesh_dir):
r"""
If the user knows that the first and last points along the
*mesh_dir* direction correspond to the same Hamiltonian (this
is **not** checked), then this routine can be used to set the
eigenvectors equal (with equal phase), by replacing the last
eigenvector with the first one (for each band, and for each
other mesh direction, if any).
This routine should not be used if the first and last points
are related by a reciprocal lattice vector; in that case,
:func:`pythtb.wf_array.impose_pbc` should be used instead.
:param mesh_dir: Direction of wf_array along which you wish to
impose periodic boundary conditions.
Example usage::
# Suppose the wf_array object is three-dimensional
# corresponding to (kx,ky,lambda) where (kx,ky) are
# wavevectors of a 2D insulator and lambda is an
# adiabatic parameter that goes around a closed loop.
# Then to insure that the states at the ends of the lambda
# path are equal (with equal phase) in preparation for
# computing Berry phases in lambda for given (kx,ky),
# do wf.impose_loop(mesh_dir=2)
"""
# Copy first eigenvector onto last one
if mesh_dir==0:
self._wfs[-1,...]=self._wfs[0,...]
elif mesh_dir==1:
self._wfs[:,-1,...]=self._wfs[:,0,...]
elif mesh_dir==2:
self._wfs[:,:,-1,...]=self._wfs[:,:,0,...]
elif mesh_dir==3:
self._wfs[:,:,:,-1,...]=self._wfs[:,:,:,0,...]
else:
raise Exception("\n\nWrong value of mesh_dir.")
def berry_phase(self,occ,dir=None,contin=True,berry_evals=False):
r"""
Computes the Berry phase along a given array direction and
for a given set of occupied states. This assumes that the
occupied bands are well separated in energy from unoccupied
bands. It is the responsibility of the user to check that
this is satisfied. By default, the Berry phase traced over
occupied bands is returned, but optionally the individual
phases of the eigenvalues of the global unitary rotation
matrix (corresponding to "maximally localized Wannier
centers" or "Wilson loop eigenvalues") can be requested
(see parameter *berry_evals* for more details).
For an array of size *N* in direction $dir$, the Berry phase
is computed from the *N-1* inner products of neighboring
eigenfunctions. This corresponds to an "open-path Berry
phase" if the first and last points have no special
relation. If they correspond to the same physical
Hamiltonian, and have been properly aligned in phase using
:func:`pythtb.wf_array.impose_pbc` or
:func:`pythtb.wf_array.impose_loop`, then a closed-path
Berry phase will be computed.
For a one-dimensional wf_array (i.e., a single string), the
computed Berry phases are always chosen to be between -pi and pi.
For a higher dimensional wf_array, the Berry phase is computed
for each one-dimensional string of points, and an array of
Berry phases is returned. The Berry phase for the first string
(with lowest index) is always constrained to be between -pi and
pi. The range of the remaining phases depends on the value of
the input parameter *contin*.
The discretized formula used to compute Berry phase is described
in Sec. 4.5 of :download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`.
:param occ: Array of indices of energy bands which are considered
to be occupied.
:param dir: Index of wf_array direction along which Berry phase is
computed. This parameters needs not be specified for
a one-dimensional wf_array.
:param contin: Optional boolean parameter. If True then the
branch choice of the Berry phase (which is indeterminate
modulo 2*pi) is made so that neighboring strings (in the
direction of increasing index value) have as close as
possible phases. The phase of the first string (with lowest
index) is always constrained to be between -pi and pi. If
False, the Berry phase for every string is constrained to be
between -pi and pi. The default value is True.
:param berry_evals: Optional boolean parameter. If True then
will compute and return the phases of the eigenvalues of the
product of overlap matrices. (These numbers correspond also
to hybrid Wannier function centers.) These phases are either
forced to be between -pi and pi (if *contin* is *False*) or
they are made to be continuous (if *contin* is True).
:returns:
* **pha** -- If *berry_evals* is False (default value) then
returns the Berry phase for each string. For a
one-dimensional wf_array this is just one number. For a
higher-dimensional wf_array *pha* contains one phase for
each one-dimensional string in the following format. For
example, if *wf_array* contains k-points on mesh with
indices [i,j,k] and if direction along which Berry phase
is computed is *dir=1* then *pha* will be two dimensional
array with indices [i,k], since Berry phase is computed
along second direction. If *berry_evals* is True then for
each string returns phases of all eigenvalues of the
product of overlap matrices. In the convention used for
previous example, *pha* in this case would have indices
[i,k,n] where *n* refers to index of individual phase of
the product matrix eigenvalue.
Example usage::
# Computes Berry phases along second direction for three lowest
# occupied states. For example, if wf is threedimensional, then
# pha[2,3] would correspond to Berry phase of string of states
# along wf[2,:,3]
pha = wf.berry_phase([0, 1, 2], 1)
See also these examples: :ref:`haldane_bp-example`,
:ref:`cone-example`, :ref:`3site_cycle-example`,
"""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#if dir<0 or dir>self._dim_arr-1:
# raise Exception("\n\nDirection key out of range")
#
# This could be coded more efficiently, but it is hard-coded for now.
#
# 1D case
if self._dim_arr==1:
# pick which wavefunctions to use
wf_use=self._wfs[:,occ,:]
# calculate berry phase
ret=_one_berry_loop(wf_use,berry_evals)
# 2D case
elif self._dim_arr==2:
# choice along which direction you wish to calculate berry phase
if dir==0:
ret=[]
for i in range(self._mesh_arr[1]):
wf_use=self._wfs[:,i,:,:][:,occ,:]
ret.append(_one_berry_loop(wf_use,berry_evals))
elif dir==1:
ret=[]
for i in range(self._mesh_arr[0]):
wf_use=self._wfs[i,:,:,:][:,occ,:]
ret.append(_one_berry_loop(wf_use,berry_evals))
else:
raise Exception("\n\nWrong direction for Berry phase calculation!")
# 3D case
elif self._dim_arr==3:
# choice along which direction you wish to calculate berry phase
if dir==0:
ret=[]
for i in range(self._mesh_arr[1]):
ret_t=[]
for j in range(self._mesh_arr[2]):
wf_use=self._wfs[:,i,j,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
elif dir==1:
ret=[]
for i in range(self._mesh_arr[0]):
ret_t=[]
for j in range(self._mesh_arr[2]):
wf_use=self._wfs[i,:,j,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
elif dir==2:
ret=[]
for i in range(self._mesh_arr[0]):
ret_t=[]
for j in range(self._mesh_arr[1]):
wf_use=self._wfs[i,j,:,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
else:
raise Exception("\n\nWrong direction for Berry phase calculation!")
else:
raise Exception("\n\nWrong dimensionality!")
# convert phases to numpy array
if self._dim_arr>1 or berry_evals==True:
ret=np.array(ret,dtype=float)
# make phases of eigenvalues continuous
if contin==True:
# iron out 2pi jumps, make the gauge choice such that first phase in the
# list is fixed, others are then made continuous.
if berry_evals==False:
# 2D case
if self._dim_arr==2:
ret=_one_phase_cont(ret,ret[0])
# 3D case
elif self._dim_arr==3:
for i in range(ret.shape[1]):
if i==0: clos=ret[0,0]
else: clos=ret[0,i-1]
ret[:,i]=_one_phase_cont(ret[:,i],clos)
elif self._dim_arr!=1:
raise Exception("\n\nWrong dimensionality!")
# make eigenvalues continuous. This does not take care of band-character
# at band crossing for example it will just connect pairs that are closest
# at neighboring points.
else:
# 2D case
if self._dim_arr==2:
ret=_array_phases_cont(ret,ret[0,:])
# 3D case
elif self._dim_arr==3:
for i in range(ret.shape[1]):
if i==0: clos=ret[0,0,:]
else: clos=ret[0,i-1,:]
ret[:,i]=_array_phases_cont(ret[:,i],clos)
elif self._dim_arr!=1:
raise Exception("\n\nWrong dimensionality!")
return ret
def position_matrix(self, key, occ, dir):
"""Similar to :func:`pythtb.tb_model.position_matrix`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_matrix(evec,dir)
def position_expectation(self, key, occ, dir):
"""Similar to :func:`pythtb.tb_model.position_expectation`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_expectation(evec,dir)
def position_hwf(self, key, occ, dir, hwf_evec=False, basis="bloch"):
"""Similar to :func:`pythtb.tb_model.position_hwf`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_hwf(evec,dir,hwf_evec,basis)
def berry_flux(self,occ,dirs=None,individual_phases=False):
r"""
In the case of a 2-dimensional *wf_array* array calculates the
integral of Berry curvature over the entire plane. In higher
dimensional case (3 or 4) it will compute integrated curvature
over all 2-dimensional slices of a higher-dimensional
*wf_array*.
:param occ: Array of indices of energy bands which are considered
to be occupied.
:param dirs: Array of indices of two wf_array directions on which
the Berry flux is computed. This parameter needs not be
specified for a two-dimensional wf_array. By default *dirs* takes
first two directions in the array.
:param individual_phases: If *True* then returns Berry phase
for each plaquette (small square) in the array. Default
value is *False*.
:returns:
* **flux** -- In a 2-dimensional case returns and integral
of Berry curvature (if *individual_phases* is *True* then
returns integral of Berry phase around each plaquette).
In higher dimensional case returns integral of Berry
curvature over all slices defined with directions *dirs*.
Returned value is an array over the remaining indices of
*wf_array*. (If *individual_phases* is *True* then it
returns again phases around each plaquette for each
slice. First indices define the slice, last two indices
index the plaquette.)
Example usage::
# Computes integral of Berry curvature of first three bands
flux = wf.berry_flux([0, 1, 2])
"""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# default case is to take first two directions for flux calculation
if dirs==None:
dirs=[0,1]
# consistency checks
if dirs[0]==dirs[1]:
raise Exception("Need to specify two different directions for Berry flux calculation.")
if dirs[0]>=self._dim_arr or dirs[1]>=self._dim_arr or dirs[0]<0 or dirs[1]<0:
raise Exception("Direction for Berry flux calculation out of bounds.")
# 2D case
if self._dim_arr==2:
# compute the fluxes through all plaquettes on the entire plane
ord=list(range(len(self._wfs.shape)))
# select two directions from dirs
ord[0]=dirs[0]
ord[1]=dirs[1]
plane_wfs=self._wfs.transpose(ord)
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes
all_phases=_one_flux_plane(plane_wfs)
# return either total flux or individual phase for each plaquete
if individual_phases==False:
return all_phases.sum()
else:
return all_phases
# 3D or 4D case
elif self._dim_arr in [3,4]:
# compute the fluxes through all plaquettes on the entire plane
ord=list(range(len(self._wfs.shape)))
# select two directions from dirs
ord[0]=dirs[0]
ord[1]=dirs[1]
# find directions over which we wish to loop
ld=list(range(self._dim_arr))
ld.remove(dirs[0])
ld.remove(dirs[1])
if len(ld)!=self._dim_arr-2:
raise Exception("Hm, this should not happen? Inconsistency with the mesh size.")
# add remaining indices
if self._dim_arr==3:
ord[2]=ld[0]
if self._dim_arr==4:
ord[2]=ld[0]
ord[3]=ld[1]
# reorder wavefunctions
use_wfs=self._wfs.transpose(ord)
# loop over the the remaining direction
if self._dim_arr==3:
slice_phases=np.zeros((self._mesh_arr[ord[2]],self._mesh_arr[dirs[0]]-1,self._mesh_arr[dirs[1]]-1),dtype=float)
for i in range(self._mesh_arr[ord[2]]):
# take a 2d slice
plane_wfs=use_wfs[:,:,i]
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes on the slice
slice_phases[i,:,:]=_one_flux_plane(plane_wfs)
elif self._dim_arr==4:
slice_phases=np.zeros((self._mesh_arr[ord[2]],self._mesh_arr[ord[3]],self._mesh_arr[dirs[0]]-1,self._mesh_arr[dirs[1]]-1),dtype=float)
for i in range(self._mesh_arr[ord[2]]):
for j in range(self._mesh_arr[ord[3]]):
# take a 2d slice
plane_wfs=use_wfs[:,:,i,j]
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes on the slice
slice_phases[i,j,:,:]=_one_flux_plane(plane_wfs)
# return either total flux or individual phase for each plaquete
if individual_phases==False:
return slice_phases.sum(axis=(-2,-1))
else:
return slice_phases
else:
raise Exception("\n\nWrong dimensionality!")
def berry_curv(self,occ,individual_phases=False):
r"""
.. warning:: This function has been renamed as :func:`pythtb.berry_flux` and is provided
here only for backwards compatibility with versions of pythtb prior to 1.7.0. Please
use related :func:`pythtb.berry_flux` as this function may not exist in future releases.
"""
print("""
Warning:
Usage of function berry_curv is discouraged.
It has been renamed as berry_flux, which should be used instead.
""")
return self.berry_flux(occ,individual_phases)
def k_path(kpts,nk,endpoint=True):
r"""
.. warning:: This function is here only for backwards compatibility
with version of pythtb prior to 1.7.0. Please use related :func:`pythtb.tb_model.k_path`
function as this function might not exist in the future releases of the code.
"""
print("""
Warning:
Usage of function k_path is discouraged.
Instead of the following code:
k_vec=k_path(...)
please use the following code:
(k_vec,k_dist,k_node)=my_model.k_path(...)
Note that this k_path function is a member of the tb_model class.
""")
if kpts=='full':
# this means the full Brillouin zone for 1D case
if endpoint==True:
return np.arange(nk+1,dtype=float)/float(nk)
else:
return np.arange(nk,dtype=float)/float(nk)
elif kpts=='half':
# this means the half Brillouin zone for 1D case
if endpoint==True:
return np.arange(nk+1,dtype=float)/float(2.*nk)
else:
return np.arange(nk,dtype=float)/float(2.*nk)
else:
# general case
kint=[]
k_list=np.array(kpts)
# go over all kpoints
for i in range(len(k_list)-1):
# go over all steps
for j in range(nk):
cur=k_list[i]+(k_list[i+1]-k_list[i])*float(j)/float(nk)
kint.append(cur)
# add last point
if endpoint==True:
kint.append(k_list[-1])
#
kint=np.array(kint)
return kint
def _nicefy_eig(eval,eig=None):
"Sort eigenvaules and eigenvectors, if given, and convert to real numbers"
# first take only real parts of the eigenvalues
eval=np.array(eval.real,dtype=float)
# sort energies
args=eval.argsort()
eval=eval[args]
if not (eig is None):
eig=eig[args]
return (eval,eig)
return eval
# for nice justified printout
def _nice_float(x,just,rnd):
return str(round(x,rnd)).rjust(just)
def _nice_int(x,just):
return str(x).rjust(just)
def _nice_complex(x,just,rnd):
ret=""
ret+=_nice_float(complex(x).real,just,rnd)
if complex(x).imag<0.0:
ret+=" - "
else:
ret+=" + "
ret+=_nice_float(abs(complex(x).imag),just,rnd)
ret+=" i"
return ret
def _wf_dpr(wf1,wf2):
"""calculate dot product between two wavefunctions.
wf1 and wf2 are of the form [orbital,spin]"""
return np.dot(wf1.flatten().conjugate(),wf2.flatten())
def _one_berry_loop(wf,berry_evals=False):
"""Do one Berry phase calculation (also returns a product of M
matrices). Always returns numbers between -pi and pi. wf has
format [kpnt,band,orbital,spin] and kpnt has to be one dimensional.
Assumes that first and last k-point are the same. Therefore if
there are n wavefunctions in total, will calculate phase along n-1
links only! If berry_evals is True then will compute phases for
individual states, these corresponds to 1d hybrid Wannier
function centers. Otherwise just return one number, Berry phase."""
# number of occupied states
nocc=wf.shape[1]
# temporary matrices
prd=np.identity(nocc,dtype=complex)
ovr=np.zeros([nocc,nocc],dtype=complex)
# go over all pairs of k-points, assuming that last point is overcounted!
for i in range(wf.shape[0]-1):
# generate overlap matrix, go over all bands
for j in range(nocc):
for k in range(nocc):
ovr[j,k]=_wf_dpr(wf[i,j,:],wf[i+1,k,:])
# only find Berry phase
if berry_evals==False:
# multiply overlap matrices
prd=np.dot(prd,ovr)
# also find phases of individual eigenvalues
else:
# cleanup matrices with SVD then take product
matU,sing,matV=np.linalg.svd(ovr)
prd=np.dot(prd,np.dot(matU,matV))
# calculate Berry phase
if berry_evals==False:
det=np.linalg.det(prd)
pha=(-1.0)*np.angle(det)
return pha
# calculate phases of all eigenvalues
else:
evals=np.linalg.eigvals(prd)
eval_pha=(-1.0)*np.angle(evals)
# sort these numbers as well
eval_pha=np.sort(eval_pha)
return eval_pha
def _one_flux_plane(wfs2d):
"Compute fluxes on a two-dimensional plane of states."
# size of the mesh
nk0=wfs2d.shape[0]
nk1=wfs2d.shape[1]
# number of bands (will compute flux of all bands taken together)
nbnd=wfs2d.shape[2]
# here store flux through each plaquette of the mesh
all_phases=np.zeros((nk0-1,nk1-1),dtype=float)
# go over all plaquettes
for i in range(nk0-1):
for j in range(nk1-1):
# generate a small loop made out of four pieces
wf_use=[]
wf_use.append(wfs2d[i,j])
wf_use.append(wfs2d[i+1,j])
wf_use.append(wfs2d[i+1,j+1])
wf_use.append(wfs2d[i,j+1])
wf_use.append(wfs2d[i,j])
wf_use=np.array(wf_use,dtype=complex)
# calculate phase around one plaquette
all_phases[i,j]=_one_berry_loop(wf_use)
return all_phases
def no_2pi(x,clos):
"Make x as close to clos by adding or removing 2pi"
while abs(clos-x)>np.pi:
if clos-x>np.pi:
x+=2.0*np.pi
elif clos-x<-1.0*np.pi:
x-=2.0*np.pi
return x
def _one_phase_cont(pha,clos):
"""Reads in 1d array of numbers *pha* and makes sure that they are
continuous, i.e., that there are no jumps of 2pi. First number is
made as close to *clos* as possible."""
ret=np.copy(pha)
# go through entire list and "iron out" 2pi jumps
for i in range(len(ret)):
# which number to compare to
if i==0: cmpr=clos
else: cmpr=ret[i-1]
# make sure there are no 2pi jumps
ret[i]=no_2pi(ret[i],cmpr)
return ret
def _array_phases_cont(arr_pha,clos):
"""Reads in 2d array of phases *arr_pha* and makes sure that they
are continuous along first index, i.e., that there are no jumps of
2pi. First array of phasese is made as close to *clos* as
possible."""
ret=np.zeros_like(arr_pha)
# go over all points
for i in range(arr_pha.shape[0]):
# which phases to compare to
if i==0: cmpr=clos
else: cmpr=ret[i-1,:]
# remember which indices are still available to be matched
avail=list(range(arr_pha.shape[1]))
# go over all phases in cmpr[:]
for j in range(cmpr.shape[0]):
# minimal distance between pairs
min_dist=1.0E10
# closest index
best_k=None
# go over each phase in arr_pha[i,:]
for k in avail:
cur_dist=np.abs(np.exp(1.0j*cmpr[j])-np.exp(1.0j*arr_pha[i,k]))
if cur_dist<=min_dist:
min_dist=cur_dist
best_k=k
# remove this index from being possible pair later
avail.pop(avail.index(best_k))
# store phase in correct place
ret[i,j]=arr_pha[i,best_k]
# make sure there are no 2pi jumps
ret[i,j]=no_2pi(ret[i,j],cmpr[j])
return ret
class w90(object):
r"""
This class of the PythTB package imports tight-binding model
parameters from an output of a `Wannier90
<http://www.wannier.org>`_ code.
The `Wannier90 <http://www.wannier.org>`_ code is a
post-processing tool that takes as an input electron wavefunctions
and energies computed from first-principles using any of the
following codes: Quantum-Espresso (PWscf), AbInit, SIESTA, FLEUR,
Wien2k, VASP. As an output Wannier90 will create files that
contain parameters for a tight-binding model that exactly
reproduces the first-principles calculated electron band
structure.
The interface from Wannier90 to PythTB will use only the following
files created by Wannier90:
- *prefix*.win
- *prefix*\_hr.dat
- *prefix*\_centres.xyz
- *prefix*\_band.kpt (optional)
- *prefix*\_band.dat (optional)
The first file (*prefix*.win) is an input file to Wannier90 itself. This
file is needed so that PythTB can read in the unit cell vectors.
To correctly create the second and the third file (*prefix*\_hr.dat and
*prefix*\_centres.dat) one needs to include the following flags in the win
file::
hr_plot = True
write_xyz = True
translate_home_cell = False
These lines ensure that *prefix*\_hr.dat and *prefix*\_centres.dat
are written and that the centers of the Wannier functions written
in the *prefix*\_centres.dat file are not translated to the home
cell. The *prefix*\_hr.dat file contains the onsite and hopping
terms.
The final two files (*prefix*\_band.kpt and *prefix*\_band.dat)
are optional. Please see documentation of function
:func:`pythtb.w90.w90_bands_consistency` for more detail.
So far we tested only Wannier90 version 2.0.1.
.. warning:: For the time being PythTB is not optimized to be used
with very large tight-binding models. Therefore it is not
advisable to use the interface to Wannier90 with large
first-principles calculations that contain many k-points and/or
electron bands. One way to reduce the computational cost is to
wannierize with Wannier90 only the bands of interest (for
example, bands near the Fermi level).
Units used throught this interface with Wannier90 are
electron-volts (eV) and Angstroms.
.. warning:: User needs to make sure that the Wannier functions
computed using Wannier90 code are well localized. Otherwise the
tight-binding model might not interpolate well the band
structure. To ensure that the Wannier functions are well
localized it is often enough to check that the total spread at
the beginning of the minimization procedure (first total spread
printed in .wout file) is not more than 20% larger than the
total spread at the end of the minimization procedure. If those
spreads differ by much more than 20% user needs to specify
better initial projection functions.
In addition, please note that the interpolation is valid only
within the frozen energy window of the disentanglement
procedure.
.. warning:: So far PythTB assumes that the position operator is
diagonal in the tight-binding basis. This is discussed in the
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` in Eq. 2.7.,
:math:`\langle\phi_{{\bf R} i} \vert {\bf r} \vert \phi_{{\bf
R}' j} \rangle = ({\bf R} + {\bf t}_j) \delta_{{\bf R} {\bf R}'}
\delta_{ij}`. However, this relation does not hold for Wannier
functions! Therefore, if you use tight-binding model derived
from this class in computing Berry-like objects that involve
position operator such as Berry phase or Berry flux, you would
not get the same result as if you computed those objects
directly from the first-principles code! Nevertheless, this
approximation does not affect other properties such as band
structure dispersion.
For the testing purposes user can download the following
:download:`wannier90 output example
<misc/wannier90_example.tar.gz>` and use the following
:ref:`script <w90_quick>` to test the functionality of the interface to
PythTB. Run the following command in unix terminal to decompress
the tarball::
tar -zxf wannier90_example.tar.gz
and then run the following :ref:`script <w90_quick>` in the same
folder.
:param path: Relative path to the folder that contains Wannier90
files. These are *prefix*.win, *prefix*\_hr.dat,
*prefix*\_centres.dat and optionally *prefix*\_band.kpt and
*prefix*\_band.dat.
:param prefix: This is the prefix used by Wannier90 code.
Typically the input to the Wannier90 code is name *prefix*.win.
Initially this function will read in the entire Wannier90 output.
To create :class:`pythtb.tb_model` object user needs to call
:func:`pythtb.w90.model`.
Example usage::
# reads Wannier90 from folder called *example_a*
# it assumes that that folder contains files "silicon.win" and so on
silicon=w90("example_a", "silicon")
"""
def __init__(self,path,prefix):
# store path and prefix
self.path=path
self.prefix=prefix
# read in lattice_vectors
f=open(self.path+"/"+self.prefix+".win","r")
ln=f.readlines()
f.close()
# get lattice vector
self.lat=np.zeros((3,3),dtype=float)
found=False
for i in range(len(ln)):
sp=ln[i].split()
if len(sp)>=2:
if sp[0].lower()=="begin" and sp[1].lower()=="unit_cell_cart":
# get units right
if ln[i+1].strip().lower()=="bohr":
pref=0.5291772108
skip=1
elif ln[i+1].strip().lower() in ["ang","angstrom"]:
pref=1.0
skip=1
else:
pref=1.0
skip=0
# now get vectors
for j in range(3):
sp=ln[i+skip+1+j].split()
for k in range(3):
self.lat[j,k]=float(sp[k])*pref
found=True
break
if found==False:
raise Exception("Unable to find unit_cell_cart block in the .win file.")
# read in hamiltonian matrix, in eV
f=open(self.path+"/"+self.prefix+"_hr.dat","r")
ln=f.readlines()
f.close()
#
# get number of wannier functions
self.num_wan=int(ln[1])
# get number of Wigner-Seitz points
num_ws=int(ln[2])
# get degenereacies of Wigner-Seitz points
deg_ws=[]
for j in range(3,len(ln)):
sp=ln[j].split()
for s in sp:
deg_ws.append(int(s))
if len(deg_ws)==num_ws:
last_j=j
break
if len(deg_ws)>num_ws:
raise Exception("Too many degeneracies for WS points!")
deg_ws=np.array(deg_ws,dtype=int)
# now read in matrix elements
# Convention used in w90 is to write out:
# R1, R2, R3, i, j, ham_r(i,j,R)
# where ham_r(i,j,R) corresponds to matrix element < i | H | j+R >
self.ham_r={} # format is ham_r[(R1,R2,R3)]["h"][i,j] for < i | H | j+R >
ind_R=0 # which R vector in line is this?
for j in range(last_j+1,len(ln)):
sp=ln[j].split()
# get reduced lattice vector components
ham_R1=int(sp[0])
ham_R2=int(sp[1])
ham_R3=int(sp[2])
# get Wannier indices
ham_i=int(sp[3])-1
ham_j=int(sp[4])-1
# get matrix element
ham_val=float(sp[5])+1.0j*float(sp[6])
# store stuff, for each R store hamiltonian and degeneracy
ham_key=(ham_R1,ham_R2,ham_R3)
if (ham_key in self.ham_r)==False:
self.ham_r[ham_key]={
"h":np.zeros((self.num_wan,self.num_wan),dtype=complex),
"deg":deg_ws[ind_R]
}
ind_R+=1
self.ham_r[ham_key]["h"][ham_i,ham_j]=ham_val
# check if for every non-zero R there is also -R
for R in self.ham_r:
if not (R[0]==0 and R[1]==0 and R[2]==0):
found_pair=False
for P in self.ham_r:
if not (R[0]==0 and R[1]==0 and R[2]==0):
# check if they are opposite
if R[0]==-P[0] and R[1]==-P[1] and R[2]==-P[2]:
if found_pair==True:
raise Exception("Found duplicate negative R!")
found_pair=True
if found_pair==False:
raise Exception("Did not find negative R for R = "+R+"!")
# read in wannier centers
f=open(self.path+"/"+self.prefix+"_centres.xyz","r")
ln=f.readlines()
f.close()
# Wannier centers in Cartesian, Angstroms
xyz_cen=[]
for i in range(2,2+self.num_wan):
sp=ln[i].split()
if sp[0]=="X":
tmp=[]
for j in range(3):
tmp.append(float(sp[j+1]))
xyz_cen.append(tmp)
else:
raise Exception("Inconsistency in the centres file.")
self.xyz_cen=np.array(xyz_cen,dtype=float)
# get orbital positions in reduced coordinates
self.red_cen=_cart_to_red((self.lat[0],self.lat[1],self.lat[2]),self.xyz_cen)
def model(self,zero_energy=0.0,min_hopping_norm=None,max_distance=None,ignorable_imaginary_part=None):
"""
This function returns :class:`pythtb.tb_model` object that can
be used to interpolate the band structure at arbitrary
k-point, analyze the wavefunction character, etc.
The tight-binding basis orbitals in the returned object are
maximally localized Wannier functions as computed by
Wannier90. The orbital character of these functions can be
inferred either from the *projections* block in the
*prefix*.win or from the *prefix*.nnkp file. Please note that
the character of the maximally localized Wannier functions is
not exactly the same as that specified by the initial
projections. One way to ensure that the Wannier functions are
as close to the initial projections as possible is to first
choose a good set of initial projections (for these initial
and final spread should not differ more than 20%) and then
perform another Wannier90 run setting *num_iter=0* in the
*prefix*.win file.
Number of spin components is always set to 1, even if the
underlying DFT calculation includes spin. Please refer to the
*projections* block or the *prefix*.nnkp file to see which
orbitals correspond to which spin.
Locations of the orbitals in the returned
:class:`pythtb.tb_model` object are equal to the centers of
the Wannier functions computed by Wannier90.
:param zero_energy: Sets the zero of the energy in the band
structure. This value is typically set to the Fermi level
computed by the density-functional code (or to the top of the
valence band). Units are electron-volts.
:param min_hopping_norm: Hopping terms read from Wannier90 with
complex norm less than *min_hopping_norm* will not be included
in the returned tight-binding model. This parameters is
specified in electron-volts. By default all terms regardless
of their norm are included.
:param max_distance: Hopping terms from site *i* to site *j+R* will
be ignored if the distance from orbital *i* to *j+R* is larger
than *max_distance*. This parameter is given in Angstroms.
By default all terms regardless of the distance are included.
:param ignorable_imaginary_part: The hopping term will be assumed to
be exactly real if the absolute value of the imaginary part as
computed by Wannier90 is less than *ignorable_imaginary_part*.
By default imaginary terms are not ignored. Units are again
eV.
:returns:
* **tb** -- The object of type :class:`pythtb.tb_model` that can be used to
interpolate Wannier90 band structure to an arbitrary k-point as well
as to analyze the character of the wavefunctions. Please note
Example usage::
# returns tb_model with all hopping parameters
my_model=silicon.model()
# simplified model that contains only hopping terms above 0.01 eV
my_model_simple=silicon.model(min_hopping_norm=0.01)
my_model_simple.display()
"""
# make the model object
tb=tb_model(3,3,self.lat,self.red_cen)
# remember that this model was computed from w90
tb._assume_position_operator_diagonal=False
# add onsite energies
onsite=np.zeros(self.num_wan,dtype=float)
for i in range(self.num_wan):
tmp_ham=self.ham_r[(0,0,0)]["h"][i,i]/float(self.ham_r[(0,0,0)]["deg"])
onsite[i]=tmp_ham.real
if np.abs(tmp_ham.imag)>1.0E-9:
raise Exception("Onsite terms should be real!")
tb.set_onsite(onsite-zero_energy)
# add hopping terms
for R in self.ham_r:
# avoid double counting
use_this_R=True
# avoid onsite terms
if R[0]==0 and R[1]==0 and R[2]==0:
avoid_diagonal=True
else:
avoid_diagonal=False
# avoid taking both R and -R
if R[0]!=0:
if R[0]<0:
use_this_R=False
else:
if R[1]!=0:
if R[1]<0:
use_this_R=False
else:
if R[2]<0:
use_this_R=False
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
# scan through unique R
if use_this_R==True:
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# get distance between orbitals
dist_ijR=np.sqrt(np.dot(-vec_i+vec_j+vecR,
-vec_i+vec_j+vecR))
# to prevent double counting
if not (avoid_diagonal==True and j<=i):
# only if distance between orbitals is small enough
if max_distance is not None:
if dist_ijR>max_distance:
continue
# divide the matrix element from w90 with the degeneracy
tmp_ham=self.ham_r[R]["h"][i,j]/float(self.ham_r[R]["deg"])
# only if big enough matrix element
if min_hopping_norm is not None:
if np.abs(tmp_ham)<min_hopping_norm:
continue
# remove imaginary part if needed
if ignorable_imaginary_part is not None:
if np.abs(tmp_ham.imag)<ignorable_imaginary_part:
tmp_ham=tmp_ham.real+0.0j
# set the hopping term
tb.set_hop(tmp_ham,i,j,list(R))
return tb
def dist_hop(self):
"""
This is one of the diagnostic tools that can be used to help
in determining *min_hopping_norm* and *max_distance* parameter in
:func:`pythtb.w90.model` function call.
This function returns all hopping terms (from orbital *i* to
*j+R*) as well as the distances between the *i* and *j+R*
orbitals. For well localized Wannier functions hopping term
should decay exponentially with distance.
:returns:
* **dist** -- Distances between Wannier function centers (*i* and *j+R*) in Angstroms.
* **ham** -- Corresponding hopping terms in eV.
Example usage::
# get distances and hopping terms
(dist,ham)=silicon.dist_hop()
# plot logarithm of the hopping term as a function of distance
import pylab as plt
fig, ax = plt.subplots()
ax.scatter(dist,np.log(np.abs(ham)))
fig.savefig("localization.pdf")
"""
ret_ham=[]
ret_dist=[]
for R in self.ham_r:
# treat diagonal terms differently
if R[0]==0 and R[1]==0 and R[2]==0:
avoid_diagonal=True
else:
avoid_diagonal=False
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# diagonal terms
if not (avoid_diagonal==True and i==j):
# divide the matrix element from w90 with the degeneracy
ret_ham.append(self.ham_r[R]["h"][i,j]/float(self.ham_r[R]["deg"]))
# get distance between orbitals
ret_dist.append(np.sqrt(np.dot(-vec_i+vec_j+vecR,-vec_i+vec_j+vecR)))
return (np.array(ret_dist),np.array(ret_ham))
def shells(self,num_digits=2):
"""
This is one of the diagnostic tools that can be used to help
in determining *max_distance* parameter in
:func:`pythtb.w90.model` function call.
:param num_digits: Distances will be rounded up to these many
digits. Default value is 2.
:returns:
* **shells** -- All distances between all Wannier function centers (*i* and *j+R*) in Angstroms.
Example usage::
# prints on screen all shells
print silicon.shells()
"""
shells=[]
for R in self.ham_r:
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# get distance between orbitals
dist_ijR=np.sqrt(np.dot(-vec_i+vec_j+vecR,
-vec_i+vec_j+vecR))
# round it up
shells.append(round(dist_ijR,num_digits))
# remove duplicates and sort
shells=np.sort(list(set(shells)))
return shells
def w90_bands_consistency(self):
"""
This function reads in band structure as interpolated by
Wannier90. Please note that this is not the same as the band
structure calculated by the underlying DFT code. The two will
agree only on the coarse set of k-points that were used in
Wannier90 generation.
The purpose of this function is to compare the interpolation
in Wannier90 with that in PythTB. If no terms were ignored in
the call to :func:`pythtb.w90.model` then the two should
be exactly the same (up to numerical precision). Otherwise
one should expect deviations. However, if one carefully
chooses the cutoff parameters in :func:`pythtb.w90.model`
it is likely that one could reproduce the full band-structure
with only few dominant hopping terms. Please note that this
tests only the eigenenergies, not eigenvalues (wavefunctions).
The code assumes that the following files were generated by
Wannier90,
- *prefix*\_band.kpt
- *prefix*\_band.dat
These files will be generated only if the *prefix*.win file
contains the *kpoint_path* block.
:returns:
* **kpts** -- k-points in reduced coordinates used in the
interpolation in Wannier90 code. The format of *kpts* is
the same as the one used by the input to
:func:`pythtb.tb_model.solve_all`.
* **ene** -- energies interpolated by Wannier90 in
eV. Format is ene[band,kpoint].
Example usage::
# get band structure from wannier90
(w90_kpt,w90_evals)=silicon.w90_bands_consistency()
# get simplified model
my_model_simple=silicon.model(min_hopping_norm=0.01)
# solve simplified model on the same k-path as in wannier90
evals=my_model.solve_all(w90_kpt)
# plot comparison of the two
import pylab as plt
fig, ax = plt.subplots()
for i in range(evals.shape[0]):
ax.plot(range(evals.shape[1]),evals[i],"r-",zorder=-50)
for i in range(w90_evals.shape[0]):
ax.plot(range(w90_evals.shape[1]),w90_evals[i],"k-",zorder=-100)
fig.savefig("comparison.pdf")
"""
# read in kpoints in reduced coordinates
kpts=np.loadtxt(self.path+"/"+self.prefix+"_band.kpt",skiprows=1)
# ignore weights
kpts=kpts[:,:3]
# read in energies
ene=np.loadtxt(self.path+"/"+self.prefix+"_band.dat")
# ignore kpath distance
ene=ene[:,1]
# correct shape
ene=ene.reshape((self.num_wan,kpts.shape[0]))
return (kpts,ene)
def _cart_to_red(tmp,cart):
"Convert cartesian vectors cart to reduced coordinates of a1,a2,a3 vectors"
(a1,a2,a3)=tmp
# matrix with lattice vectors
cnv=np.array([a1,a2,a3])
# transpose a matrix
cnv=cnv.T
# invert a matrix
cnv=np.linalg.inv(cnv)
# reduced coordinates
red=np.zeros_like(cart,dtype=float)
for i in range(0,len(cart)):
red[i]=np.dot(cnv,cart[i])
return red
def _red_to_cart(tmp,red):
"Convert reduced to cartesian vectors."
(a1,a2,a3)=tmp
# cartesian coordinates
cart=np.zeros_like(red,dtype=float)
for i in range(0,len(cart)):
cart[i,:]=a1*red[i][0]+a2*red[i][1]+a3*red[i][2]
return cart
def _offdiag_approximation_warning_and_stop():
raise Exception("""
----------------------------------------------------------------------
It looks like you are trying to calculate Berry-like object that
involves position operator. However, you are using a tight-binding
model that was generated from Wannier90. This procedure introduces
approximation as it ignores off-diagonal elements of the position
operator in the Wannier basis. This is discussed here in more
detail:
http://physics.rutgers.edu/pythtb/usage.html#pythtb.w90
If you know what you are doing and wish to continue with the
calculation despite this approximation, please call the following
function on your tb_model object
my_model.ignore_position_operator_offdiagonal()
----------------------------------------------------------------------
""")
| lgpl-3.0 |
saikatgomes/recsys | code/lamdaMART.py | 1 | 7445 | import math
import numpy as np
import math
# import pandas
from optparse import OptionParser
from sklearn.tree import DecisionTreeRegressor
from collections import defaultdict
from copy import deepcopy
from multiprocessing import Pool
from itertools import chain
import time
class Ensemble:
def __init__(self, rate):
self.trees = []
self.rate = rate
def __len__(self):
return len(self.trees)
def add(self, tree):
self.trees.append(tree)
def eval_one(self, object):
return self.eval([object])[0]
def eval(self, objects):
results = np.zeros(len(objects))
for tree in self.trees:
results += tree.predict(objects) * self.rate
return results
def remove(self, number):
self.trees = self.trees[:-number]
def groupby(score, query):
result = []
this_query = None
this_list = -1
for s, q in zip(score, query):
if q != this_query:
result.append([])
this_query = q
this_list += 1
result[this_list].append(s)
result = map(np.array, result)
return result
def compute_point_dcg(arg):
rel, i = arg
return (2 ** rel - 1) / math.log(i + 2, 2)
def compute_point_dcg2(arg):
rel, i = arg
if i == 0:
return rel
else:
return rel / (math.log(1 + i, 2))
return
def compute_dcg(array):
dcg = map(compute_point_dcg, zip(array, range(len(array))))
return sum(dcg)
def compute_ndcg(page, k=10):
idcg = compute_dcg(np.sort(page)[::-1][:k])
dcg = compute_dcg(page[:k])
if idcg == 0:
return 1
return dcg / idcg
def ndcg(prediction, true_score, query, k=10):
true_pages = groupby(true_score, query)
pred_pages = groupby(prediction, query)
total_ndcg = []
for q in range(len(true_pages)):
total_ndcg.append(compute_ndcg(true_pages[q][np.argsort(pred_pages[q])[::-1]], k))
return sum(total_ndcg) / len(total_ndcg)
def query_lambdas(page):
true_page, pred_page = page
worst_order = np.argsort(true_page)
true_page = true_page[worst_order]
pred_page = pred_page[worst_order]
page = true_page[np.argsort(pred_page)]
idcg = compute_dcg(np.sort(page)[::-1])
position_score = np.zeros((len(true_page), len(true_page)))
for i in xrange(len(true_page)):
for j in xrange(len(true_page)):
position_score[i, j] = compute_point_dcg((page[i], j))
lambdas = np.zeros(len(true_page))
for i in xrange(len(true_page)):
for j in xrange(len(true_page)):
if page[i] > page[j]:
delta_dcg = position_score[i][j] - position_score[i][i]
delta_dcg += position_score[j][i] - position_score[j][j]
delta_ndcg = abs(delta_dcg / idcg)
rho = 1 / (1 + math.exp(page[i] - page[j]))
lam = rho * delta_ndcg
lambdas[i] -= lam
lambdas[j] += lam
return lambdas
def compute_lambdas(prediction, true_score, query, k=10):
true_pages = groupby(true_score, query)
pred_pages = groupby(prediction, query)
print len(true_pages), "pages"
pool = Pool()
lambdas = pool.map(query_lambdas, zip(true_pages, pred_pages))
return list(chain(*lambdas))
def mart_responces(prediction, true_score):
return true_score - prediction
def learn(train_file, n_trees=10, learning_rate=0.1, k=10):
print "Loading train file"
train = np.loadtxt(train_file, delimiter=",", skiprows=1)
# validation = np.loadtxt(validation_file, delimiter=",", skiprows=1)
scores = train[:, 0]
# val_scores = train[:, 0]
queries = train[:, 1]
# val_queries = validation[:, 1]
features = train[:, 3:]
# val_features = validation[:, 3:]
ensemble = Ensemble(learning_rate)
print "Training starts..."
model_output = np.array([float(0)] * len(features))
# val_output = np.array([float(0)] * len(validation))
# print model_output
# best_validation_score = 0
time.clock()
for i in range(n_trees):
print " Iteration: " + str(i + 1)
# Compute psedo responces (lambdas)
# witch act as training label for document
start = time.clock()
print " --generating labels"
# lambdas = compute_lambdas(model_output, scores, queries, k)
lambdas = mart_responces(model_output, scores)
print " --done", str(time.clock() - start) + "sec"
# create tree and append it to the model
print " --fitting tree"
start = time.clock()
tree = DecisionTreeRegressor(max_depth=2)
# print "Distinct lambdas", set(lambdas)
tree.fit(features, lambdas)
print " ---done", str(time.clock() - start) + "sec"
print " --adding tree to ensemble"
ensemble.add(tree)
# update model score
print " --generating step prediction"
prediction = tree.predict(features)
# print "Distinct answers", set(prediction)
print " --updating full model output"
model_output += learning_rate * prediction
# print set(model_output)
# train_score
start = time.clock()
print " --scoring on train"
train_score = ndcg(model_output, scores, queries, 10)
print " --iteration train score " + str(train_score) + ", took " + str(time.clock() - start) + "sec to calculate"
# # validation score
# print " --scoring on validation"
# val_output += learning_rate * tree.predict(val_features)
# val_score = ndcg(val_output, val_scores, val_queries, 10)
# print " --iteration validation score " + str(val_score)
# if(validation_score > best_validation_score):
# best_validation_score = validation_score
# best_model_len = len(ensemble)
# # have we assidently break the celling?
# if (best_validation_score > 0.9):
# break
# rollback to best
# if len(ensemble) > best_model_len:
# ensemble.remove(len(ensemble) - best_model_len)
# finishing up
# print "final quality evaluation"
train_score = compute_ndcg(ensemble.eval(features), scores)
# test_score = compute_ndcg(ensemble.eval(validation), validation_score)
# print "train %s, test %s" % (train_score, test_score)
print "Finished sucessfully."
print "------------------------------------------------"
return ensemble
def evaluate(model, fn):
predict = np.loadtxt(fn, delimiter=",", skiprows=1)
queries = predict[:, 1]
doc_id = predict[:, 2]
features = predict[:, 3:]
results = model.eval(features)
writer = csv.writer(open("result.csv"))
for line in zip(queries, results, doc_id):
writer.writerow(line)
return "OK"
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-t", "--train", action="store", type="string", dest="train_file")
# parser.add_option("-v", "--validation", action="store", type="string", dest="val_file")
parser.add_option("-p", "--predict", action="store", type="string", dest="predict_file")
options, args = parser.parse_args()
iterations = 30
learning_rate = 0.001
model = learn(options.train_file,
# options.val_file,
n_trees=200)
evaluate(model, options.predict_file)
| apache-2.0 |
briancoutinho0905/2dsampling | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
ruiminshen/yolo-tf | model/yolo2/__init__.py | 1 | 6597 | """
Copyright (C) 2017, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import configparser
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import utils
from . import inference
from .. import yolo
class Model(object):
def __init__(self, net, classes, anchors, training=False):
_, self.cell_height, self.cell_width, _ = net.get_shape().as_list()
cells = self.cell_height * self.cell_width
inputs = tf.reshape(net, [-1, cells, len(anchors), 5 + classes], name='inputs')
with tf.name_scope('regress'):
with tf.name_scope('inputs'):
with tf.name_scope('inputs_sigmoid') as name:
inputs_sigmoid = tf.nn.sigmoid(inputs[:, :, :, :3], name=name)
self.iou = tf.identity(inputs_sigmoid[:, :, :, 0], name='iou')
self.offset_xy = tf.identity(inputs_sigmoid[:, :, :, 1:3], name='offset_xy')
with tf.name_scope('wh') as name:
self.wh = tf.identity(tf.exp(inputs[:, :, :, 3:5]) * np.reshape(anchors, [1, 1, len(anchors), -1]), name=name)
with tf.name_scope('prob') as name:
self.prob = tf.identity(tf.nn.softmax(inputs[:, :, :, 5:]), name=name)
self.areas = tf.reduce_prod(self.wh, -1, name='areas')
_wh = self.wh / 2
self.offset_xy_min = tf.identity(self.offset_xy - _wh, name='offset_xy_min')
self.offset_xy_max = tf.identity(self.offset_xy + _wh, name='offset_xy_max')
self.wh01 = tf.identity(self.wh / np.reshape([self.cell_width, self.cell_height], [1, 1, 1, 2]), name='wh01')
self.wh01_sqrt = tf.sqrt(self.wh01, name='wh01_sqrt')
self.coords = tf.concat([self.offset_xy, self.wh01_sqrt], -1, name='coords')
if not training:
with tf.name_scope('detection'):
cell_xy = yolo.calc_cell_xy(self.cell_height, self.cell_width).reshape([1, cells, 1, 2])
self.xy = tf.identity(cell_xy + self.offset_xy, name='xy')
self.xy_min = tf.identity(cell_xy + self.offset_xy_min, name='xy_min')
self.xy_max = tf.identity(cell_xy + self.offset_xy_max, name='xy_max')
self.conf = tf.identity(tf.expand_dims(self.iou, -1) * self.prob, name='conf')
self.inputs = net
self.classes = classes
self.anchors = anchors
class Objectives(dict):
def __init__(self, model, mask, prob, coords, offset_xy_min, offset_xy_max, areas):
self.model = model
with tf.name_scope('true'):
self.mask = tf.identity(mask, name='mask')
self.prob = tf.identity(prob, name='prob')
self.coords = tf.identity(coords, name='coords')
self.offset_xy_min = tf.identity(offset_xy_min, name='offset_xy_min')
self.offset_xy_max = tf.identity(offset_xy_max, name='offset_xy_max')
self.areas = tf.identity(areas, name='areas')
with tf.name_scope('iou') as name:
_offset_xy_min = tf.maximum(model.offset_xy_min, self.offset_xy_min, name='_offset_xy_min')
_offset_xy_max = tf.minimum(model.offset_xy_max, self.offset_xy_max, name='_offset_xy_max')
_wh = tf.maximum(_offset_xy_max - _offset_xy_min, 0.0, name='_wh')
_areas = tf.reduce_prod(_wh, -1, name='_areas')
areas = tf.maximum(self.areas + model.areas - _areas, 1e-10, name='areas')
iou = tf.truediv(_areas, areas, name=name)
with tf.name_scope('mask'):
best_box_iou = tf.reduce_max(iou, 2, True, name='best_box_iou')
best_box = tf.to_float(tf.equal(iou, best_box_iou), name='best_box')
mask_best = tf.identity(self.mask * best_box, name='mask_best')
mask_normal = tf.identity(1 - mask_best, name='mask_normal')
with tf.name_scope('dist'):
iou_dist = tf.square(model.iou - mask_best, name='iou_dist')
coords_dist = tf.square(model.coords - self.coords, name='coords_dist')
prob_dist = tf.square(model.prob - self.prob, name='prob_dist')
with tf.name_scope('objectives'):
cnt = np.multiply.reduce(iou_dist.get_shape().as_list())
self['iou_best'] = tf.identity(tf.reduce_sum(mask_best * iou_dist) / cnt, name='iou_best')
self['iou_normal'] = tf.identity(tf.reduce_sum(mask_normal * iou_dist) / cnt, name='iou_normal')
_mask_best = tf.expand_dims(mask_best, -1)
self['coords'] = tf.identity(tf.reduce_sum(_mask_best * coords_dist) / cnt, name='coords')
self['prob'] = tf.identity(tf.reduce_sum(_mask_best * prob_dist) / cnt, name='prob')
class Builder(yolo.Builder):
def __init__(self, args, config):
section = __name__.split('.')[-1]
self.args = args
self.config = config
with open(os.path.join(utils.get_cachedir(config), 'names'), 'r') as f:
self.names = [line.strip() for line in f]
self.width = config.getint(section, 'width')
self.height = config.getint(section, 'height')
self.anchors = pd.read_csv(os.path.expanduser(os.path.expandvars(config.get(section, 'anchors'))), sep='\t').values
self.func = getattr(inference, config.get(section, 'inference'))
def __call__(self, data, training=False):
_, self.output = self.func(data, len(self.names), len(self.anchors), training=training)
with tf.name_scope(__name__.split('.')[-1]):
self.model = Model(self.output, len(self.names), self.anchors, training=training)
def create_objectives(self, labels):
section = __name__.split('.')[-1]
self.objectives = Objectives(self.model, *labels)
with tf.name_scope('weighted_objectives'):
for key in self.objectives:
tf.add_to_collection(tf.GraphKeys.LOSSES, tf.multiply(self.objectives[key], self.config.getfloat(section + '_hparam', key), name='weighted_' + key))
| lgpl-3.0 |
simonzhangsm/voltdb | tools/vis2.py | 2 | 29097 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import rgb2hex
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
from operator import itemgetter, attrgetter
import numpy as np
import csv
import time
import datetime
STATS_SERVER = 'volt2'
NaN = float("nan")
# These are the "Tableau 20" colors as RGB.
COLORS = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(COLORS)):
r, g, b = COLORS[i]
COLORS[i] = (r / 255., g / 255., b / 255.)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
WIDTH = 1700
HEIGHT = 850
APX = 80
APY = 10
last = -1
DATA_HEADER = "branch|chart|master-last|master-ma-last|master-stdev|branch-last|no-stdev-vs-master(neg=worse)|pct-vs-master(neg=worse)".split(
"|")
branch_colors = {}
def get_stats(hostname, port, days):
"""Get most recent run statistics of all apps within the last 'days'
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'AverageOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
maxdate = datetime.datetime(1970, 1, 1, 0, 0, 0)
mindate = datetime.datetime(2038, 1, 19, 0, 0, 0)
stats = dict()
run_stat_keys = ['app', 'nodes', 'branch', 'date', 'tps', 'lat95', 'lat99']
# print resp
for row in resp.tables[0].tuples:
group = (row[0], row[1])
app_stats = []
maxdate = max(maxdate, row[3])
mindate = min(mindate, row[3])
if group not in stats:
stats[group] = app_stats
else:
app_stats = stats[group]
run_stats = dict(zip(run_stat_keys, row))
app_stats.append(run_stats)
return (stats, mindate, maxdate)
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h, xmin, xmax, series):
self.filename = filename
self.legends = {}
self.xmax = xmax
self.xmin = xmin
self.series = series
self.title = title
self.fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = self.fig.add_subplot(111)
self.ax.set_title(title)
plt.tick_params(axis='x', which='major', labelsize=16)
plt.tick_params(axis='y', labelright=True, labelleft=False, labelsize=16)
plt.Locator.MAXTICKS = 2000
plt.grid(True, color='black', alpha=0.5)
self.fig.autofmt_xdate()
plt.autoscale(enable=True, axis='x', tight=None)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
def plot(self, x, y, color, marker_shape, legend, linestyle):
self.ax.plot(x, y, linestyle, label=legend, color=color,
marker=marker_shape, markerfacecolor=color, markersize=8)
def close(self):
plt.axvline(x=datetime.datetime(2016, 1, 11, 12, 00, 0), color='black')
x_formatter = matplotlib.dates.DateFormatter("%b %d %y")
self.ax.xaxis.set_major_formatter(x_formatter)
xmin, xmax = plt.xlim()
if (self.xmax - self.xmin).days >= 365:
l = 13
loc = matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO, interval=13)
minloc = None
else:
l = 7
loc = matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO, interval=1)
minloc = matplotlib.ticker.AutoMinorLocator(n=l)
if loc:
self.ax.xaxis.set_major_locator(loc)
if minloc:
self.ax.xaxis.set_minor_locator(minloc)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
self.ax.yaxis.set_major_formatter(y_formatter)
ymin, ymax = plt.ylim()
plt.xlim((self.xmin.toordinal(),
(self.xmax + datetime.timedelta(1)).replace(minute=0, hour=0, second=0, microsecond=0).toordinal()))
if self.series.startswith('lat'):
lloc = 2
else:
lloc = 3
plt.legend(prop={'size': 10}, loc=lloc)
plt.savefig(self.filename, format="png", transparent=False, bbox_inches="tight", pad_inches=0.2)
plt.close('all')
class Bdata(dict):
def __init__(self, *args, **kwargs):
dict.update(self, *args, **kwargs)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return None
def __setitem__(self, key, value):
return dict.__setitem__(self, key, value)
def __delitem__(self, key):
return dict.__delitem__(self, key)
def __contains__(self, key):
return dict.__contains__(self, key)
def update(self, *args, **kwargs):
dict.update(self, *args, **kwargs)
def __getattribute__(self, *args, **kwargs):
if dict.__contains__(self, args[0]):
return self.__getitem__(args[0])
return dict.__getattribute__(self, args[0])
def __setattr__(self, key, value):
return dict.__setitem__(self, key, value)
def plot(title, xlabel, ylabel, filename, width, height, app, data, series, mindate, maxdate, polarity, analyze):
global branch_colors
plot_data = dict()
for run in data:
if run['branch'] not in plot_data:
plot_data[run['branch']] = {series: []}
if series == 'tppn':
value = run['tps'] / run['nodes']
else:
value = run[series]
if value != 0.0:
datenum = matplotlib.dates.date2num(run['date'])
plot_data[run['branch']][series].append((datenum, value))
if len(plot_data) == 0:
return
runs = 0
for run in plot_data.itervalues():
runs += len(run.values()[0])
if runs == 0:
pl = Plot(title, xlabel, ylabel, filename, width, height, mindate, maxdate, series)
pl.ax.annotate("Intentionally blank", xy=(.5, .5), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
pl.close()
return
# increase the figure size to allow for annotations
branches_sort = sorted(plot_data.keys())
height = (height or HEIGHT) + APY * len(branches_sort)
pl = Plot(title, xlabel, ylabel, filename, width, height, mindate, maxdate, series)
toc = dict()
branches_sort = sorted(plot_data.keys())
if 'master' in branches_sort:
branches_sort.remove('master')
branches_master_first = ['master'] + branches_sort
else:
branches_master_first = branches_sort
print "WARN: has no master: %s" % title
# loop thru branches, 'master' is first
bn = 0
for b in branches_master_first:
bd = plot_data[b]
bn += 1
# k is the chart type like lat95, lat99, thpt, etc
# v is the chart's data list[tuples(float,float),]
for k, v in bd.items():
if len(v) == 0:
print "branch %s, chart %s has no data points, skipping..." % (b, k)
continue
if k not in toc.keys():
toc[k] = []
v = sorted(v, key=lambda x: x[0])
# u is the chart data
u = zip(*v)
# get the colors and markers
if b not in branch_colors:
branch_colors[b] = (
COLORS[len(branch_colors.keys()) % len(COLORS)], MARKERS[len(branch_colors.keys()) % len(MARKERS)])
# compute and saved master's average, median, std on raw (all) data
bdata = Bdata(branch=b, title=title, chart=k, color=None, seriescolor=branch_colors[b][0],
seriesmarker=branch_colors[b][1], xdata=u[0], ydata=u[1],
last=u[1][-1], avg=np.average(u[1]), median=np.median(u[1]), stdev=np.std(u[1]), ma=[NaN],
ama=NaN, mstd=[NaN],
pctmadiff=NaN, mnstddiff=NaN, failed=None, bgcolor=None)
analyze.append(bdata)
# plot the series
pl.plot(u[0], u[1], bdata.seriescolor, bdata.seriesmarker, bdata.branch, '-')
# master is processed first, but set this up in case there is no master data for a chart
master = analyze[-1] # remember master
MOVING_AVERAGE_DAYS = 10
(ma, ama, mstd, mastd) = moving_average(bdata.ydata, MOVING_AVERAGE_DAYS)
if len(ma) > MOVING_AVERAGE_DAYS:
pl.plot(bdata.xdata, ma, bdata.seriescolor, None, None, ":")
if b == 'master':
# if we have enough data compute moving average and moving std dev
# std is std of data correspoinding to each ma window
# nb. std is used only in the charts for the 2-sigma line
# ama is mean of moving average population
# mstd is std of moving average all values
if len(ma) >= MOVING_AVERAGE_DAYS:
bdata.update(ma=ma, ama=ama, mstd=mstd, mastd=mastd)
# plot the moving average
# see if the series should be flagged out of spec
failed = 0
if polarity == 1:
# increasing is bad
bestpoint = np.nanmin(ma)
localminormax = (bdata.xdata[np.nanargmin(ma)], bestpoint)
if b == 'master' and bdata.ma[last] > bdata.median * 1.05:
failed = 1
else:
# decreasing is bad
bestpoint = np.nanmax(ma)
localminormax = (bdata.xdata[np.nanargmax(ma)], bestpoint)
if b == 'master' and bdata.ma[last] < bdata.median * 0.95:
failed = 1
# plot the 2-sigma line
twosigma = np.sum([np.convolve(bdata.mstd, polarity * 2), bdata.ma], axis=0)
pl.plot(bdata.xdata, twosigma, bdata.seriescolor, None, None, '-.')
pl.ax.annotate(r"$2\sigma$", xy=(bdata.xdata[last], twosigma[last]), xycoords='data',
xytext=(20, 0),
textcoords='offset points', ha='right', color=bdata.seriescolor, alpha=0.5)
# plot the 20% line
twntypercent = np.sum([np.convolve(bdata.ma, polarity * 0.2), bdata.ma], axis=0)
pl.plot(bdata.xdata, twntypercent, bdata.seriescolor, None, None, '-.')
pl.ax.annotate(r"20%", xy=(bdata.xdata[last], twntypercent[last]), xycoords='data', xytext=(20, 0),
textcoords='offset points', ha='right', color=bdata.seriescolor, alpha=0.5)
#pctmaxdev = (bestpoint - master.ma[last]) / bestpoint * 100. * polarity # pct diff min/max
pctmaxdev = (ama - master.ma[last]) / ama * 100. * polarity # pct diff ma deviation
# pctmedian = (master.ma[last] - bdata.median) / master.median * 100. #pct diff median
pctmadiff = (master.ma[last] - bdata.ydata[last]) / master.ma[last] * 100. * polarity # pct diff last vs ma mean
mnstddiff = (master.ma[last] - bdata.ydata[last]) / master.stdev * polarity # no std diff last vs ma
mnstdmadiff = (master.ma[last] - bdata.ydata[last]) / master.mstd[last] * polarity # no std diff std of most recent window
bdata.update(pctmadiff=pctmadiff, mnstddiff=mnstddiff)
# when do we flag a chart? standard deviation is easy to use for an estimator but since it relies
# on squares it will tend to give poor results if the deviations are large. Also, we'll just assume
# that our distribution is Normal, so 95% of data points should lie withing 2 stddev of the mean
# set background color of chart if there's a data point outside 2sigma or if the moving
# average has negatively deviated from its mean by more than 5 or 10%
# yellow if > 5%
# red if >= 10%
# negative values are worse
failed = False
color = None
# moving average has degraded by 5+ (yellow) or 10+ (red) pct
# last has degraded from ma (mean) by 5+ or 10+ pcs AND last >= 1.5 stdev off the last mean
if pctmaxdev <= -10.0 or \
pctmadiff <= -10.0 and mnstddiff <= -1.5:
color = 'red'
elif pctmaxdev <= -5.0 or \
pctmadiff > -10.0 and pctmadiff <= -5.0 and mnstddiff <= -1.5:
color = 'yellow'
if color:
failed = True
print title, b, k, pctmaxdev, pctmadiff, mnstddiff, str(color)
toc[k].append((b, color))
if failed:
for pos in ['top', 'bottom', 'right', 'left']:
pl.ax.spines[pos].set_edgecolor(color)
pl.ax.set_axis_bgcolor(color)
pl.ax.patch.set_alpha(0.1)
bdata.update(failed=failed, bgcolor=color)
# annotate value of the best point aka localminormax
pl.ax.annotate("%.2f" % bestpoint, xy=localminormax, xycoords='data', xytext=(0, -10 * polarity),
textcoords='offset points', ha='center', color=bdata.seriescolor, alpha=0.5)
# annotate value and percent vs reference point of most recent moving average on master
pl.ax.annotate("%.2f" % bdata.ma[last], xy=(bdata.xdata[last], bdata.ma[last]), xycoords='data',
xytext=(5, +5), textcoords='offset points', ha='left', alpha=0.5)
pl.ax.annotate("(%+.2f%%)" % pctmaxdev, xy=(bdata.xdata[last], bdata.ma[last]), xycoords='data',
xytext=(5, -5),
textcoords='offset points', ha='left', alpha=0.5)
# annotation with moving average values
# bdata.update(pctmedian=pctmedian, bestpoint=bestpoint, pctmaxdev=pctmaxdev)
# raw data to the chart
pl.ax.annotate('%s %s: %s n: %d last: %.2f avg: %.2f sdev: %.2f (%.2f%% avg) (%.2f%% ma) ma: %.2f'
' (%+.2f%% of bestma) (%+.2f%% of lastma) (%+.2f #stdev) (%.2f #mstd) avg(ma):'
' %.2f std(ma): %.2f' % (
bdata.seriesmarker, bdata.branch, bdata.bgcolor, len(bdata.ydata),
bdata.ydata[last],
bdata.avg, bdata.stdev, bdata.stdev / bdata.avg * 100., bdata.stdev / bdata.ma[last] * 100.,
bdata.ma[last], pctmaxdev, bdata.pctmadiff, bdata.mnstddiff, mnstdmadiff, bdata.ama, bdata.mastd),
xy=(APX, APY * bn),
xycoords='figure points', horizontalalignment='left', verticalalignment='top',
color=bdata.seriescolor, fontsize=10, alpha=1.0)
else:
# branches comparing to master (no moving average component)
if master.ama is not NaN:
pctmadiff = (master.ama - bdata.ydata[
last]) / master.ama * 100. * polarity # pct diff last vs ma mean
mnstddiff = (master.ama - bdata.ydata[last]) / master.stdev * polarity # no std diff last vs ma
color = 'black'
if pctmadiff > -10.0 and pctmadiff <= -5.0:
color = 'yellow'
elif pctmadiff <= -10.0:
color = 'red'
if mnstddiff >= 2.0:
color = 'red'
bdata.update(bgcolor=color, pctmadiff=pctmadiff, mnstddiff=mnstddiff)
pl.ax.annotate(
bdata.seriesmarker + ' %s: %s n: %d last %.2f avg: %.2f sdev: %.2f (%.2f%% of ma) no-std-master-avg(ma): %.2f pct-master-avg(ma): %.2f' %
(bdata.branch, bdata.bgcolor, len(bdata.ydata), bdata.ydata[last], bdata.avg, bdata.stdev,
bdata.stdev / master.ama * 100., bdata.mnstddiff, bdata.pctmadiff),
xy=(APX, APY * bn), xycoords='figure points', horizontalalignment='left', verticalalignment='top',
color=bdata.seriescolor, fontsize=10, alpha=1.0)
if len(analyze) == 1:
pl.ax.annotate(datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S"), xy=(.20, .95),
xycoords='figure fraction', horizontalalignment='left', verticalalignment='top',
fontsize=8)
pl.close()
return toc
def generate_index_file(filenames, branches):
row = """
<tr>
<td><a href="%s"><img src="%s" width="100%%" height="35%%"/></a></td>
<td><a href="%s"><img src="%s" width="100%%" height="35%%"/></a></td>
<td><a href="%s"><img src="%s" width="100%%" height="35%%"/></a></td>
</tr>
"""
sep = """
</table>
<table frame="box" width="100%%">
<tr>
<th colspan="3"><a name="%s">%s</a></th>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
Generated on %s<br>
%s
<table frame="box" width="100%%">
%s
</table>
</body>
</html>
"""
branch_studies = """
<a href=%s>%s</a>
"""
hrow = """
<tr>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
</tr>
"""
toc = []
for x in filenames:
tdattr = "<span></span>"
if len(x) == 6:
color = None
# rollup worse case color flag condition
for type in x[5].values():
for branch in type:
if branch[1] == 'red':
color = 'red'
break
elif color is None and branch[1] == 'yellow':
color = 'yellow'
if color == 'red':
break
if color:
tdattr = '<span style="color:%s">►</span>' % color
toc.append(("", x[0].replace(' ', '%20'), tdattr + x[0]))
n = 4
z = n - len(toc) % n
while z > 0 and z < n:
toc.append(('', '', ''))
z -= 1
rows = []
t = ()
for i in range(1, len(toc) + 1):
t += tuple(toc[i - 1])
if i % n == 0:
rows.append(hrow % t)
t = ()
last_app = None
for i in filenames:
if i[0] != last_app:
rows.append(sep % (i[0], i[0]))
last_app = i[0]
rows.append(row % (i[1], i[1], i[2], i[2], i[3], i[3]))
return full_content % (time.strftime("%Y/%m/%d %H:%M:%S"), branches, ''.join(rows))
def generate_data_file(data, branches, prefix):
row = """
<tr>
<td align="center"><span style="color:%s">►</span></td>
<td align="left" width="40%%"><a href="%s">%s</a></td>
<td align="right">%s</td>
<td align="right">%s</td>
<td align="right">%s</td>
<td align="right">%s</td>
<td align="right">%s</td>
<td align="right">%s</td>
</tr>
"""
sep = """
</table>
<table frame="box" width="100%%">
<tr>
<th colspan="8"><a name="%s">%s</a></th>
</tr>
%s
"""
full_content = """
<html>
<head>
<title>Performance Raw Data</title>
</head>
<body>
Generated on %s<br>
%s
<table frame="box" width="100%%">
%s
</table>
</body>
</html>
"""
hrow = """
<tr>
<th>Flag</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
<th>%s</th>
</tr>
"""
rows = []
header2 = DATA_HEADER[1:]
#for i in range(0, len(header2) * 2, 2):
# header2.insert(i + 1, header2[i])
last_app = None
bgcolors = {'black': 'white', 'None': 'white'}
# data is an numpy ndarray = trouble
for d in range(len(data)):
i = tuple(data[d])
if i[0] != last_app:
rows.append(sep % ((i[0], i[0], hrow % tuple(header2))))
last_app = i[0]
bgcolor = bgcolors.get(i[1], i[1] or 'white')
rows.append(row % ((bgcolor, png_filename(i[2], prefix), i[2]) + tuple([round(x, 3) for x in i[3:]])))
return full_content % (time.strftime("%Y/%m/%d %H:%M:%S"), branches, ''.join(rows))
def png_filename(filename, prefix):
return prefix + "_" + filename.replace(" ", "_") + ".png"
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
if len(x) < n:
return ([], NaN, [], NaN)
x = np.asarray(x)
if type == 'simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = [NaN]*(n-1) + list(np.convolve(x, weights, mode='valid'))
# compute the mean of the moving avearage population
# over the most recent ma group
ama = np.average(a[len(a)-n:])
# compute the standard deviation of the set of data
# corresponding to each moving average window
s = [NaN]*(n-1)
for d in range(0, len(x)-n+1):
s.append(np.std(x[d : d+n]))
# also compute the standard deviation of the moving avg
# all available data points. scalar result
z = np.std(a[n - 1:])
return (a, ama, s, z)
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base [ndays]" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
ndays = 2000
if len(sys.argv) >= 4:
ndays = int(sys.argv[3])
width = WIDTH
height = HEIGHT
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[5])
# show all the history
(stats, mindate, maxdate) = get_stats(STATS_SERVER, 21212, ndays)
mindate = (mindate).replace(hour=0, minute=0, second=0, microsecond=0)
maxdate = (maxdate + datetime.timedelta(days=1)).replace(minute=0, hour=0, second=0, microsecond=0)
root_path = path
filenames = [] # (appname, latency, throughput)
iorder = 0
analyze = []
for group, data in stats.iteritems():
(study, nodes) = group
study = study.replace('/', '')
#if study != 'CSV-narrow-ix':
# continue
conn = FastSerializer(STATS_SERVER, 21212)
proc = VoltProcedure(conn, "@AdHoc", [FastSerializer.VOLTTYPE_STRING])
resp = proc.call(["select chart_order, series, chart_heading, x_label, y_label, polarity from charts where appname = '%s' order by chart_order" % study])
conn.close()
app = study + " %d %s" % (nodes, ["node", "nodes"][nodes > 1])
# chart polarity: -1 for tps (decreasing is bad), 1 for latencies (increasing is bad)
legend = {1: dict(series="lat95", heading="95tile latency", xlabel="Time", ylabel="Latency (ms)", polarity=1),
2: dict(series="lat99", heading="99tile latency", xlabel="Time", ylabel="Latency (ms)", polarity=1),
3: dict(series="tppn", heading="avg throughput per node", xlabel="Time", ylabel="ops/sec per node",
polarity=-1)
}
for r in resp.tables[0].tuples:
legend[r[0]] = dict(series=r[1], heading=r[2], xlabel=r[3], ylabel=r[4], polarity=r[5])
fns = [app]
tocs = dict()
for r in legend.itervalues():
aanalyze = []
title = app + " " + r['heading']
fn = "_" + title.replace(" ", "_") + ".png"
fns.append(prefix + fn)
toc = plot(title, r['xlabel'], r['ylabel'], path + fn, width, height, app, data, r['series'], mindate,
maxdate, r['polarity'], aanalyze)
master = Bdata()
if len(aanalyze):
master = aanalyze[0]
for branch in aanalyze:
analyze.append(tuple(
[branch['branch'], branch['bgcolor'], branch['title'], master['last'], master['ma'][last], master['stdev'], branch['last'],
branch['mnstddiff'], branch['pctmadiff']]))
if toc:
tocs.update(toc)
#if len(analyze)/3 >= 6:
# break
fns.append(iorder)
fns.append(tocs)
filenames.append(tuple(fns))
filenames.append(("KVBenchmark-five9s-nofail-latency", "", "",
"http://ci/job/performance-nextrelease-5nines-nofail/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png",
iorder))
filenames.append(("KVBenchmark-five9s-nofail-nocl-latency", "", "",
"http://ci/job/performance-nextrelease-5nines-nofail-nocl/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png",
iorder))
filenames.append(("KVBenchmark-five9s-nofail-nocl-kvm-latency", "", "",
"http://ci/job/performance-nextrelease-5nines-nofail-nocl-kvm/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png",
iorder))
filenames.append(("Openet-Shocker-three9s-latency", "", "",
"http://ci/job/performance-nextrelease-shocker/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/3nines-histograms.png",
iorder))
filenames.append(("Openet-Shocker-three9s-4x2-latency", "", "",
"http://ci/job/performance-nextrelease-shocker-4x2/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/3nines-histograms.png",
iorder))
# sort and save the raw data analyze file
with open(root_path + "-analyze.csv", "wb") as f:
writer = csv.writer(f, delimiter='|')
writer.writerows(DATA_HEADER)
aa = np.array(analyze,
dtype=[('branch-name', 'S99'), ('bgcolor', 'S99'), ('file', 'S99'), ('master', float),
('ma', float), ('std', float), ('branch-last', float), ('nstd', float), ('pct', float)])
branches = []
sanalyze = np.sort(aa, order=['branch-name', 'nstd'])
for r in sanalyze:
if r[0] not in branches:
branches.append(r[0])
writer.writerows(sanalyze)
# make convenient links to the branch studies tables
branches = '\n'.join(
["<a href=" + prefix + "-analyze.html#" + b + ">" + b + "</a><br>" for b in sorted(np.unique(branches))])
# generate branch study html page
with open(root_path + '-analyze.html', 'w') as data_file:
data_file.write(generate_data_file(sanalyze, branches, prefix))
# generate index file
index_file = open(root_path + '-index.html', 'w')
sorted_filenames = sorted(filenames, key=lambda f: f[0].lower() + str(f[1]))
index_file.write(generate_index_file(sorted_filenames, branches))
index_file.close()
if __name__ == "__main__":
main()
| agpl-3.0 |
Nyker510/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
Averroes/statsmodels | statsmodels/regression/mixed_linear_model.py | 19 | 91253 | """
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierachical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
MJ Lindstrom, DM Bates (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Choleky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Choleky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorihm cannot be used for model fitting.
"""
import numpy as np
import statsmodels.base.model as base
from scipy.optimize import fmin_ncg, fmin_cg, fmin_bfgs, fmin
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools import data as data_tools
from scipy.stats.distributions import norm
from scipy import sparse
import pandas as pd
import patsy
from statsmodels.compat.collections import OrderedDict
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _dot(x, y):
"""
Returns the dot product of the arrays, works for sparse and dense.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.dot(x, y)
elif sparse.issparse(x):
return x.dot(y)
elif sparse.issparse(y):
return y.T.dot(x.T).T
# From numpy, adapted to work with sparse and dense arrays.
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return _dot(_dot(A, B), C)
else:
return _dot(A, _dot(B, C))
def _dotsum(x, y):
"""
Returns sum(x * y), where '*' is the pointwise product, computed
efficiently for dense and sparse matrices.
"""
if sparse.issparse(x):
return x.multiply(y).sum()
else:
# This way usually avoids allocating a temporary.
return np.dot(x.ravel(), y.ravel())
def _get_exog_re_names(self, exog_re):
"""
Passes through if given a list of names. Otherwise, gets pandas names
or creates some generic variable names as needed.
"""
if self.k_re == 0:
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif isinstance(exog_re, pd.Series) and exog_re.name is not None:
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
return ["Z{0}".format(k + 1) for k in range(exog_re.shape[1])]
class MixedLMParams(object):
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : integer
The number of covariates with fixed effects.
k_re : integer
The number of covariates with random coefficients (excluding
variance components).
k_vc : integer
The number of variance components parameters.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, k_vc):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) // 2
self.k_vc = k_vc
self.k_tot = self.k_fe + self.k_re2 + self.k_vc
self._ix = np.tril_indices(self.k_re)
def from_packed(params, k_fe, k_re, use_sqrt, has_fe):
"""
Create a MixedLMParams object from packed parameter vector.
Parameters
----------
params : array-like
The mode parameters packed into a single vector.
k_fe : integer
The number of covariates with fixed effects
k_re : integer
The number of covariates with random effects (excluding
variance components).
use_sqrt : boolean
If True, the random effects covariance matrix is provided
as its Cholesky factor, otherwise the lower triangle of
the covariance matrix is stored.
has_fe : boolean
If True, `params` contains fixed effects parameters.
Otherwise, the fixed effects parameters are set to zero.
Returns
-------
A MixedLMParams object.
"""
k_re2 = int(k_re * (k_re + 1) / 2)
# The number of covariance parameters.
if has_fe:
k_vc = len(params) - k_fe - k_re2
else:
k_vc = len(params) - k_re2
pa = MixedLMParams(k_fe, k_re, k_vc)
cov_re = np.zeros((k_re, k_re))
ix = pa._ix
if has_fe:
pa.fe_params = params[0:k_fe]
cov_re[ix] = params[k_fe:k_fe+k_re2]
else:
pa.fe_params = np.zeros(k_fe)
cov_re[ix] = params[0:k_re2]
if use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
pa.cov_re = cov_re
if k_vc > 0:
if use_sqrt:
pa.vcomp = params[-k_vc:]**2
else:
pa.vcomp = params[-k_vc:]
else:
pa.vcomp = np.array([])
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None, vcomp=None):
"""
Create a MixedLMParams object from each parameter component.
Parameters
----------
fe_params : array-like
The fixed effects parameter (a 1-dimensional array). If
None, there are no fixed effects.
cov_re : array-like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array-like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
vcomp : array-like
The variance component parameters. If None, there are no
variance components.
Returns
-------
A MixedLMParams object.
"""
if vcomp is None:
vcomp = np.empty(0)
if fe_params is None:
fe_params = np.empty(0)
if cov_re is None and cov_re_sqrt is None:
cov_re = np.empty((0, 0))
k_fe = len(fe_params)
k_vc = len(vcomp)
k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0]
pa = MixedLMParams(k_fe, k_re, k_vc)
pa.fe_params = fe_params
if cov_re_sqrt is not None:
pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
elif cov_re is not None:
pa.cov_re = cov_re
pa.vcomp = vcomp
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
obj.fe_params = self.fe_params.copy()
obj.cov_re = self.cov_re.copy()
obj.vcomp = self.vcomp.copy()
return obj
def get_packed(self, use_sqrt, has_fe=False):
"""
Return the model parameters packed into a single vector.
Parameters
----------
use_sqrt : bool
If True, the Cholesky square root of `cov_re` is
included in the packed result. Otherwise the
lower triangle of `cov_re` is included.
has_fe : bool
If True, the fixed effects parameters are included
in the packed result, otherwise they are omitted.
"""
if self.k_re > 0:
if use_sqrt:
L = np.linalg.cholesky(self.cov_re)
cpa = L[self._ix]
else:
cpa = self.cov_re[self._ix]
else:
cpa = np.zeros(0)
if use_sqrt:
vcomp = np.sqrt(self.vcomp)
else:
vcomp = self.vcomp
if has_fe:
pa = np.concatenate((self.fe_params, cpa, vcomp))
else:
pa = np.concatenate((cpa, vcomp))
return pa
def _smw_solver(s, A, AtA, BI, di):
"""
Solves the system (s*I + A*B*A') * x = rhs for an arbitrary rhs.
The inverse matrix of B is block diagonal. The upper left block
is BI and the lower right block is a diagonal matrix containing
di.
Parameters
----------
s : scalar
See above for usage
A : ndarray
See above for usage
AtA : square ndarray
A.T * A
BI : square symmetric ndarray
The inverse of `B`.
di : array-like
Returns
-------
A function that takes `rhs` as an input argument and returns a
solution to the linear system defined above.
"""
# Use SMW identity
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(A):
qi = sparse.linalg.inv(qmat)
qmati = A.dot(qi.T).T
else:
qmati = np.linalg.solve(qmat, A.T)
def solver(rhs):
if sparse.issparse(A):
ql = qmati.dot(rhs)
ql = A.dot(ql)
else:
ql = np.dot(qmati, rhs)
ql = np.dot(A, ql)
rslt = rhs / s - ql / s**2
if sparse.issparse(rslt):
rslt = np.asarray(rslt.todense())
return rslt
return solver
def _smw_logdet(s, A, AtA, BI, di, B_logdet):
"""
Returns the log determinant of s*I + A*B*A'.
Uses the matrix determinant lemma to accelerate the calculation.
Parameters
----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
AtA : square matrix
A.T * A
BI : square symmetric ndarray
The upper left block of B^-1.
di : array-like
The diagonal elements of the lower right block of B^-1.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
"""
p = A.shape[0]
ld = p * np.log(s)
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(qmat):
qmat = qmat.todense()
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Parameters
----------
endog : 1d array-like
The dependent variable
exog : 2d array-like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array-like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array-like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
exog_vc : dict-like
A dicationary containing specifications of the variance
component terms. See below for details.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes
-----
`exog_vc` is a dictionary of dictionaries. Specifically,
`exog_vc[a][g]` is a matrix whose columns are linearly combined
using independent random coefficients. This random term then
contributes to the variance structure of the data for group `g`.
The random coefficients all have mean zero, and have the same
variance. The matrix must be `m x k`, where `m` is the number of
observations in group `g`. The number of columns may differ among
the top-level groups.
The covariates in `exog`, `exog_re` and `exog_vc` may (but need
not) partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
Examples
--------
A basic mixed model with fixed effects for the columns of
``exog`` and a random intercept for each distinct value of
``group``:
>>> model = sm.MixedLM(endog, exog, groups)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
correlated random coefficients for the columns of ``exog_re``:
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
independent random coefficients for the columns of ``exog_re``:
>>> free = MixedLMParams.from_components(fe_params=np.ones(exog.shape[1]),
cov_re=np.eye(exog_re.shape[1]))
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit(free=free)
A different way to specify independent random coefficients for the
columns of ``exog_re``. In this example ``groups`` must be a
Pandas Series with compatible indexing with ``exog_re``, and
``exog_re`` has two columns.
>>> g = pd.groupby(groups, by=groups).groups
>>> vc = {}
>>> vc['1'] = {k : exog_re.loc[g[k], 0] for k in g}
>>> vc['2'] = {k : exog_re.loc[g[k], 1] for k in g}
>>> model = sm.MixedLM(endog, exog, groups, vcomp=vc)
>>> result = model.fit()
"""
def __init__(self, endog, exog, groups, exog_re=None,
exog_vc=None, use_sqrt=True, missing='none',
**kwargs):
_allowed_kwargs = ["missing_idx", "design_info", "formula"]
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("argument %s not permitted for MixedLM initialization" % x)
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
# Needs to run early so that the names are sorted.
self._setup_vcomp(exog_vc)
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if (exog is not None and
data_tools._is_using_ndarray_type(exog, None) and
exog.ndim == 1):
exog = exog[:, None]
if (exog_re is not None and
data_tools._is_using_ndarray_type(exog_re, None) and
exog_re.ndim == 1):
exog_re = exog_re[:, None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self._init_keys.extend(["use_sqrt", "exog_vc"])
self.k_fe = exog.shape[1] # Number of fixed effects parameters
if exog_re is None and exog_vc is None:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
self.data.param_names = self.exog_names + ['Group RE']
elif exog_re is not None:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if self.exog_re.ndim == 1:
self.exog_re = self.exog_re[:, None]
# Model dimensions
# Number of random effect covariates
self.k_re = self.exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
# All random effects are variance components
self.k_re = 0
self.k_re2 = 0
if not self.data._param_names:
# HACK: could've been set in from_formula already
# needs refactor
(param_names, exog_re_names,
exog_re_names_full) = self._make_param_names(exog_re)
self.data.param_names = param_names
self.data.exog_re_names = exog_re_names
self.data.exog_re_names_full = exog_re_names_full
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i,g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
if self.exog_re is None:
self.exog_re2_li = None
else:
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.nobs = len(self.endog)
self.n_totobs = self.nobs
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Precompute this
self._aex_r = []
self._aex_r2 = []
for i in range(self.n_groups):
a = self._augment_exog(i)
self._aex_r.append(a)
self._aex_r2.append(_dot(a.T, a))
# Precompute this
self._lin, self._quad = self._reparam()
def _setup_vcomp(self, exog_vc):
if exog_vc is None:
exog_vc = {}
self.exog_vc = exog_vc
self.k_vc = len(exog_vc)
vc_names = list(set(exog_vc.keys()))
vc_names.sort()
self._vc_names = vc_names
def _make_param_names(self, exog_re):
"""
Returns the full parameter names list, just the exogenous random
effects variables, and the exogenous random effects variables with
the interaction terms.
"""
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(self, exog_re)
param_names = []
jj = self.k_fe
for i in range(len(exog_re_names)):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " RE")
else:
param_names.append(exog_re_names[j] + " RE x " +
exog_re_names[i] + " RE")
jj += 1
vc_names = [x + " RE" for x in self._vc_names]
return exog_names + param_names + vc_names, exog_re_names, param_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, vc_formula=None,
subset=None, use_sparse=False, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
vc_formula : dict-like
Formulas describing variance components. `vc_formula[vc]` is
the formula for the component with variance parameter named
`vc`. The formula is processed into a matrix, and the columns
of this matrix are linearly combined with independent random
coefficients having mean zero and a common variance.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If the variance component is intended to produce random
intercepts for disjoint subsets of a group, specified by
string labels or a categorical data value, always use '0 +' in
the formula so that no overall intercept is included.
If the variance components specify random slopes and you do
not also want a random group-level intercept in the model,
then use '0 +' in the formula to exclude the intercept.
The variance components formulas are processed separately for
each group. If a variable is categorical the results will not
be affected by whether the group labels are distinct or
re-used over the top-level groups.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
Examples
--------
Suppose we have an educational data set with students nested
in classrooms nested in schools. The students take a test,
and we want to relate the test scores to the students' ages,
while accounting for the effects of classrooms and schools.
The school will be the top-level group, and the classroom is a
nested group that is specified as a variance component. Note
that the schools may have different number of classrooms, and
the classroom labels may (but need not be) different across
the schools.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age', vc_formula=vc,
re_formula='1', groups='school', data=data)
Now suppose we also have a previous test score called
'pretest'. If we want the relationship between pretest
scores and the current test to vary by classroom, we can
specify a random slope for the pretest score
>>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1', groups='school', data=data)
The following model is almost equivalent to the previous one,
but here the classroom random intercept and pretest slope may
be correlated.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1 + pretest', groups='school',
data=data)
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument in MixedLM.from_formula")
# If `groups` is a variable name, retrieve the data for the
# groups variable.
group_name = "Group"
if type(kwargs["groups"]) == str:
group_name = kwargs["groups"]
kwargs["groups"] = np.asarray(data[kwargs["groups"]])
if re_formula is not None:
if re_formula.strip() == "1":
# Work around Patsy bug, fixed by 0.3.
exog_re = np.ones((data.shape[0], 1))
exog_re_names = ["Group"]
else:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re = np.asarray(exog_re)
if exog_re.ndim == 1:
exog_re = exog_re[:, None]
else:
exog_re = None
if vc_formula is None:
exog_re_names = ["groups"]
else:
exog_re_names = []
if vc_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_vc = {}
data["_group"] = kwargs["groups"]
gb = data.groupby("_group")
kylist = list(gb.groups.keys())
kylist.sort()
for vc_name in vc_formula.keys():
exog_vc[vc_name] = {}
for group_ix, group in enumerate(kylist):
ii = gb.groups[group]
vcg = vc_formula[vc_name]
mat = patsy.dmatrix(vcg, data.loc[ii, :], eval_env=eval_env,
return_type='dataframe')
if use_sparse:
exog_vc[vc_name][group] = sparse.csr_matrix(mat)
else:
exog_vc[vc_name][group] = np.asarray(mat)
exog_vc = exog_vc
else:
exog_vc = None
mod = super(MixedLM, cls).from_formula(formula, data,
subset=None,
exog_re=exog_re,
exog_vc=exog_vc,
*args, **kwargs)
# expand re names to account for pairs of RE
(param_names,
exog_re_names,
exog_re_names_full) = mod._make_param_names(exog_re_names)
mod.data.param_names = param_names
mod.data.exog_re_names = exog_re_names
mod.data.exog_re_names_full = exog_re_names_full
mod.data.vcomp_names = mod._vc_names
return mod
def predict(self, params, exog=None):
"""
Return predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a mixed linear model. Can be either a
MixedLMParams instance, or a vector containing the packed
model parameters in which the fixed effects parameters are
at the beginning of the vector, or a vector containing
only the fixed effects parameters.
exog : array-like, optional
Design / exogenous data for the fixed effects. Model exog
is used if None.
Returns
-------
An array of fitted values. Note that these predicted values
only reflect the fixed effects mean structure of the model.
"""
if exog is None:
exog = self.exog
if isinstance(params, MixedLMParams):
params = params.fe_params
else:
params = params[0:self.k_fe]
return np.dot(exog, params)
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array is None:
return None
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array-like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_kwargs : keywords
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if type(method) == str and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
vcomp = mdf.vcomp
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
resid = resid_all[self.row_indices[group]]
solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
x = exog[:, j]
u = solver(x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re)
results = MixedLMResults(self, params_prof, pcov / scale)
results.params_object = params_object
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
return MixedLMResultsWrapper(results)
def get_fe_params(self, cov_re, vcomp):
"""
Use GLS to update the fixed effects parameter estimates.
Parameters
----------
cov_re : array-like
The covariance matrix of the random effects.
Returns
-------
The GLS estimates of the fixed effects parameters.
"""
if self.k_fe == 0:
return np.array([])
if self.k_re == 0:
cov_re_inv = np.empty((0,0))
else:
cov_re_inv = np.linalg.inv(cov_re)
# Cache these quantities that don't change.
if not hasattr(self, "_endex_li"):
self._endex_li = []
for group_ix, _ in enumerate(self.group_labels):
mat = np.concatenate((self.exog_li[group_ix], self.endog_li[group_ix][:, None]), axis=1)
self._endex_li.append(mat)
xtxy = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
u = solver(self._endex_li[group_ix])
xtxy += np.dot(exog.T, u)
fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1])
return fe_params
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
transformed parameters (i.e. with the Cholesky square root
covariance and square root transformed variane components),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc
k_tot = k_fe + k_re2 + k_vc
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
for k in range(k_vc):
lin.append(np.zeros(k_tot))
quad = []
# Quadratic terms for fixed effects.
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
# Quadratic terms for random effects covariance.
ii = np.tril_indices(k_re)
ix = [(a,b) for a,b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
# Quadratic terms for variance components.
km = k_fe + k_re2
for k in range(km, km+k_vc):
quad[k][k, k] = 1
return lin, quad
def _expand_vcomp(self, vcomp, group):
"""
Replicate variance parameters to match a group's design.
Parameters
----------
vcomp : array-like
The variance parameters for the variance components.
group : string
The group label
Returns an expaded version of vcomp, in which each variance
parameter is copied as many times as there are independent
realizations of the variance component in the given group.
"""
if len(vcomp) == 0:
return np.empty(0)
vc_var = []
for j, k in enumerate(self._vc_names):
if group in self.exog_vc[k]:
vc_var.append(vcomp[j] * np.ones(self.exog_vc[k][group].shape[1]))
if len(vc_var) > 0:
return np.concatenate(vc_var)
else:
1/0
return np.empty(0)
def _augment_exog(self, group_ix):
"""
Concatenate the columns for variance components to the columns
for other random effects to obtain a single random effects
exog matrix for a given group.
"""
ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None
if self.k_vc == 0:
return ex_r
group = self.group_labels[group_ix]
ex = [ex_r] if self.k_re > 0 else []
any_sparse = False
for j,k in enumerate(self._vc_names):
if group not in self.exog_vc[k]:
continue
ex.append(self.exog_vc[k][group])
any_sparse |= sparse.issparse(ex[-1])
if any_sparse:
for j, x in enumerate(ex):
if not sparse.issparse(x):
ex[j] = sparse.csr_matrix(x)
ex = sparse.hstack(ex)
ex = sparse.csr_matrix(ex)
else:
ex = np.concatenate(ex, axis=1)
return ex
def loglike(self, params, profile_fe=True):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array-like.
The parameter value. If array-like, must be a packed
parameter vector containing only the covariance
parameters.
profile_fe : boolean
If True, replace the provided value of `fe_params` with
the GLS estimates.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
The scale parameter `scale` is always profiled out of the
log-likelihood. In addition, if `profile_fe` is true the
fixed effects parameters are also profiled out.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
cov_re = params.cov_re
vcomp = params.vcomp
# Move to the profile set
if profile_fe:
fe_params = self.get_fe_params(cov_re, vcomp)
else:
fe_params = params.fe_params
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
else:
cov_re_inv = np.zeros((0, 0))
cov_re_logdet = 0
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if (self.cov_pen is not None) and (self.k_re > 0):
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if (self.fe_pen is not None):
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var))
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
resid = resid_all[self.row_indices[group]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var, cov_aug_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = solver(resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = solver(exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_,ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def _gen_dV_dPar(self, ex_r, solver, group, max_ix=None):
"""
A generator that yields the element-wise derivative of the
marginal covariance matrix with respect to the random effects
variance and covariance parameters.
ex_r : array-like
The random effects design matrix
solver : function
A function that given x returns V^{-1}x, where V
is the group's marginal covariance matrix.
group : scalar
The group label
max_ix : integer or None
If not None, the generator ends when this index
is reached.
"""
axr = solver(ex_r)
# Regular random effects
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
mat_l, mat_r = ex_r[:,j1:j1+1], ex_r[:,j2:j2+1] # Need 2d
vsl, vsr = axr[:,j1:j1+1], axr[:,j2:j2+1]
yield jj, mat_l, mat_r, vsl, vsr, j1 == j2
jj += 1
# Variance components
for ky in self._vc_names:
if group in self.exog_vc[ky]:
if max_ix is not None and jj > max_ix:
return
mat = self.exog_vc[ky][group]
axmat = solver(mat)
yield jj, mat, mat, axmat, axmat, True
jj += 1
def score(self, params, profile_fe=True):
"""
Returns the score vector of the profile log-likelihood.
Notes
-----
The score vector that is returned is computed with respect to
the parameterization defined by this model instance's
`use_sqrt` attribute.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
if profile_fe:
params.fe_params = self.get_fe_params(params.cov_re, params.vcomp)
if self.use_sqrt:
score_fe, score_re, score_vc = self.score_sqrt(params, calc_fe=not profile_fe)
else:
score_fe, score_re, score_vc = self.score_full(params, calc_fe=not profile_fe)
if self._freepat is not None:
score_fe *= self._freepat.fe_params
score_re *= self._freepat.cov_re[self._freepat._ix]
score_vc *= self._freepat.vcomp
if profile_fe:
return np.concatenate((score_re, score_vc))
else:
return np.concatenate((score_fe, score_re, score_vc))
def score_full(self, params, calc_fe):
"""
Returns the score with respect to untransformed parameters.
Calculates the score vector for the profiled log-likelihood of
the mixed effects model with respect to the parameterization
in which the random effects covariance matrix is represented
in its full form (not using the Cholesky factor).
Parameters
----------
params : MixedLMParams or array-like
The parameter at which the score function is evaluated.
If array-like, must contain the packed random effects
parameters (cov_re and vcomp) without fe_params.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
Notes
-----
`score_re` is taken with respect to the parameterization in
which `cov_re` is represented through its lower triangle
(without taking the Cholesky square root).
"""
fe_params = params.fe_params
cov_re = params.cov_re
vcomp = params.vcomp
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
score_fe = np.zeros(self.k_fe)
score_re = np.zeros(self.k_re2)
score_vc = np.zeros(self.k_vc)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.grad(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if calc_fe and (self.fe_pen is not None):
score_fe -= self.fe_pen.grad(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0.,] * (self.k_re2 + self.k_vc)
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2 + self.k_vc)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2 + self.k_vc)
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
if self.reml:
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
vir = solver(resid)
for jj, matl, matr, vsl, vsr, sym in self._gen_dV_dPar(ex_r, solver, group):
dlv[jj] = _dotsum(matr, vsl)
if not sym:
dlv[jj] += _dotsum(matl, vsr)
ul = _dot(vir, matl)
ur = ul.T if sym else _dot(matr.T, vir)
ulr = np.dot(ul, ur)
rvavr[jj] += ulr
if not sym:
rvavr[jj] += ulr.T
if self.reml:
ul = _dot(viexog.T, matl)
ur = ul.T if sym else _dot(matr.T, viexog)
ulr = np.dot(ul, ur)
xtax[jj] += ulr
if not sym:
xtax[jj] += ulr.T
# Contribution of log|V| to the covariance parameter
# gradient.
if self.k_re > 0:
score_re -= 0.5 * dlv[0:self.k_re2]
if self.k_vc > 0:
score_vc -= 0.5 * dlv[self.k_re2:]
rvir += np.dot(resid, vir)
if calc_fe:
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.k_fe
if calc_fe and self.k_fe > 0:
score_fe += fac * xtvir / rvir
if self.k_re > 0:
score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir
if self.k_vc > 0:
score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir
if self.reml:
xtvixi = np.linalg.inv(xtvix)
for j in range(self.k_re2):
score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j])
for j in range(self.k_vc):
score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j])
return score_fe, score_re, score_vc
def score_sqrt(self, params, calc_fe=True):
"""
Returns the score with respect to transformed parameters.
Calculates the score vector with respect to the
parameterization in which the random effects covariance matrix
is represented through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array-like
The model parameters. If array-like must contain packed
parameters that are compatible with this model instance.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
"""
score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe)
params_vec = params.get_packed(use_sqrt=True, has_fe=True)
score_full = np.concatenate((score_fe, score_re, score_vc))
scr = 0.
for i in range(len(params_vec)):
v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec)
scr += score_full[i] * v
score_fe = scr[0:self.k_fe]
score_re = scr[self.k_fe:self.k_fe + self.k_re2]
score_vc = scr[self.k_fe + self.k_re2:]
return score_fe, score_re, score_vc
def hessian(self, params):
"""
Returns the model's Hessian matrix.
Calculates the Hessian matrix for the linear mixed effects
model with respect to the parameterization in which the
covariance matrix is represented directly (without square-root
transformation).
Parameters
----------
params : MixedLMParams or array-like
The model parameters at which the Hessian is calculated.
If array-like, must contain the packed parameters in a
form that is compatible with this model instance.
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt,
has_fe=True)
fe_params = params.fe_params
vcomp = params.vcomp
cov_re = params.cov_re
if self.k_re > 0:
cov_re_inv = np.linalg.inv(cov_re)
else:
cov_re_inv = np.empty((0, 0))
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc))
hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe))
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0.,] * (self.k_re2 + self.k_vc)
m = self.k_re2 + self.k_vc
B = np.zeros(m)
D = np.zeros((m, m))
F = [[0.] * m for k in range(m)]
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[k]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
vir = solver(resid)
rvir += np.dot(resid, vir)
for jj1, matl1, matr1, vsl1, vsr1, sym1 in self._gen_dV_dPar(ex_r, solver, group):
ul = _dot(viexog.T, matl1)
ur = _dot(matr1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if not sym1:
ul = _dot(viexog.T, matr1)
ur = _dot(matl1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if self.reml:
ul = _dot(viexog.T, matl1)
ur = ul if sym1 else np.dot(viexog.T, matr1)
ulr = _dot(ul, ur.T)
xtax[jj1] += ulr
if not sym1:
xtax[jj1] += ulr.T
ul = _dot(vir, matl1)
ur = ul if sym1 else _dot(vir, matr1)
B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2)
# V^{-1} * dV/d_theta
E = [(vsl1, matr1)]
if not sym1:
E.append((vsr1, matl1))
for jj2, matl2, matr2, vsl2, vsr2, sym2 in self._gen_dV_dPar(ex_r, solver, group, jj1):
re = sum([_multi_dot_three(matr2.T, x[0], x[1].T) for x in E])
vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re), vir[:, None])
if not sym2:
le = sum([_multi_dot_three(matl2.T, x[0], x[1].T) for x in E])
vt += 2 * _dot(_multi_dot_three(vir[None, :], matr2, le), vir[:, None])
D[jj1, jj2] += vt
if jj1 != jj2:
D[jj2, jj1] += vt
rt = _dotsum(vsl2, re.T) / 2
if not sym2:
rt += _dotsum(vsr2, le.T) / 2
hess_re[jj1, jj2] += rt
if jj1 != jj2:
hess_re[jj2, jj1] += rt
if self.reml:
ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E])
u1 = _dot(viexog.T, matl2)
u2 = _dot(matr2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
if not sym2:
u1 = np.dot(viexog.T, matr2)
u2 = np.dot(matl2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
hess_fe -= fac * xtvix / rvir
hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
QL = [np.linalg.solve(xtvix, x) for x in xtax]
for j1 in range(self.k_re2 + self.k_vc):
for j2 in range(j1 + 1):
a = _dotsum(QL[j1].T, QL[j2])
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2 + self.k_vc
hess = np.zeros((m, m))
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess
def get_scale(self, fe_params, cov_re, vcomp):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Parameters
----------
fe_params : array-like
The regression slope estimates
cov_re : 2d array-like
Estimate of the random effects covariance matrix
vcomp : array-like
Estimate of the variance components
Returns
-------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
qf = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
mat = solver(resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def fit(self, start_params=None, reml=True, niter_sa=0,
do_cg=True, fe_pen=None, cov_pen=None, free=None,
full_output=False, method='bfgs', **kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params: array-like or MixedLMParams
Starting values for the profile log-likeihood. If not a
`MixedLMParams` instance, this should be an array
containing the packed parameters for the profile
log-likelihood, including the fixed effects
parameters.
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
fe_pen : Penalty object
A penalty on the fixed effects
free : MixedLMParams object
If not `None`, this is a mask that allows parameters to be
held fixed at specified values. A 1 indicates that the
correspondinig parameter is estimated, a 0 indicates that
it is fixed at its starting value. Setting the `cov_re`
component to the identity matrix fits a model with
independent random effects. Note that some optimization
methods do not respect this contraint (bfgs and lbfgs both
work).
full_output : bool
If true, attach iteration history to results
method : string
Optimization method.
Returns
-------
A MixedLMResults instance.
"""
_allowed_kwargs = ['gtol', 'maxiter']
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("Argument %s not allowed for MixedLM.fit" % x)
if method.lower() in ["newton", "ncg"]:
raise ValueError("method %s not available for MixedLM" % method)
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._freepat = free
if full_output:
hist = []
else:
hist = None
success = False
if start_params is None:
params = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
params.fe_params = np.zeros(self.k_fe)
params.cov_re = np.eye(self.k_re)
params.vcomp = np.ones(self.k_vc)
else:
if isinstance(start_params, MixedLMParams):
params = start_params
else:
# It's a packed array
if len(start_params) == self.k_fe + self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=True)
elif len(start_params) == self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
else:
raise ValueError("invalid start_params")
if do_cg:
kwargs["retall"] = hist is not None
if "disp" not in kwargs:
kwargs["disp"] = False
packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False)
# It seems that the optimizers sometimes stop too soon, so
# we run a few times.
for rep in range(5):
rslt = super(MixedLM, self).fit(start_params=packed,
skip_hessian=True,
method=method,
**kwargs)
if rslt.mle_retvals['converged']:
break
packed = rslt.params
# The optimization succeeded
params = np.atleast_1d(rslt.params)
if hist is not None:
hist.append(rslt.mle_retvals)
converged = rslt.mle_retvals['converged']
if not converged:
msg = "Gradient optimization failed."
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt, has_fe=False)
cov_re_unscaled = params.cov_re
vcomp_unscaled = params.vcomp
fe_params = self.get_fe_params(cov_re_unscaled, vcomp_unscaled)
params.fe_params = fe_params
scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled)
cov_re = scale * cov_re_unscaled
vcomp = scale * vcomp_unscaled
if (((self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01)) or
((self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01))):
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# Hessian with respect to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
hess = self.hessian(params)
hess_diag = np.diag(hess)
if free is not None:
pcov = np.zeros_like(hess)
pat = self._freepat.get_packed(use_sqrt=False, has_fe=True)
ii = np.flatnonzero(pat)
hess_diag = hess_diag[ii]
if len(ii) > 0:
hess1 = hess[np.ix_(ii, ii)]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
if np.any(hess_diag >= 0):
msg = "The Hessian matrix at the estimated parameter values is not positive definite."
warnings.warn(msg, ConvergenceWarning)
# Prepare a results class instance
params_packed = params.get_packed(use_sqrt=False, has_fe=True)
results = MixedLMResults(self, params_packed, pcov / scale)
results.params_object = params
results.fe_params = fe_params
results.cov_re = cov_re
results.vcomp = vcomp
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = converged
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
results.use_sqrt = self.use_sqrt
results.freepat = self._freepat
return MixedLMResultsWrapper(results)
class MixedLMResults(base.LikelihoodModelResults, base.ResultMixin):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
Pointer to PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
fe_params : array
The fitted fixed-effects coefficients
re_params : array
The fitted random-effects covariance matrix
bse_fe : array
The standard errors of the fitted fixed effects coefficients
bse_re : array
The standard errors of the fitted random effects covariance
matrix
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
self.nobs = self.model.nobs
self.df_resid = self.nobs - np_matrix_rank(self.model.exog)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values for the model.
The fitted values reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
fit = np.dot(self.model.exog, self.fe_params)
re = self.random_effects
for group_ix, group in enumerate(self.model.group_labels):
ix = self.model.row_indices[group]
mat = [self.model.exog_re_li[group_ix]]
for c in self.model._vc_names:
if group in self.model.exog_vc[c]:
mat.append(self.model.exog_vc[c][group])
mat = np.concatenate(mat, axis=1)
fit[ix] += np.dot(mat, re[group])
return fit
@cache_readonly
def resid(self):
"""
Returns the residuals for the model.
The residuals reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
@cache_readonly
def bse_re(self):
"""
Returns the standard errors of the variance parameters. Note
that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
of p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def _expand_re_names(self, group):
names = list(self.model.data.exog_re_names)
for v in self.model._vc_names:
if group in self.model.exog_vc[v]:
ix = range(self.model.exog_vc[v][group].shape[1])
na = ["%s[%d]" % (v, j + 1) for j in ix]
names.extend(na)
return names
@cache_readonly
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
means of the random effects for the group.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(xtvir, index=self._expand_re_names(group))
return ranef_dict
@cache_readonly
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict
# Need to override since t-tests are only used for fixed effects parameters.
def t_test(self, r_matrix, scale=None, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if r_matrix.shape[1] != self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns" % self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super(MixedLMResults, self).t_test(r_matrix, scale=scale, use_t=use_t)
return tst_rslt
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
-----------
yname : string, optional
Default is `y`
xname_fe : list of strings, optional
Fixed effects covariate names
xname_re : list of strings, optional
Random effects covariate names
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = OrderedDict()
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=self.model.data.param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
@cache_readonly
def llf(self):
return self.model.loglike(self.params_object, profile_fe=False)
@cache_readonly
def aic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df)
@cache_readonly
def bic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1.):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : integer
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : string
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : integer
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : integer
The number of points at which to calculate the likelihood
abov the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog, groups = pmodel.endog, pmodel.exog, pmodel.groups
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype == 're':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model._vc_names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype == 're':
mat = mat[np.ix_(ix, ix)]
if vtype == 're':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype == 're':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype == 're':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev
class MixedLMResultsWrapper(base.LikelihoodResultsWrapper):
_attrs = {'bse_re': ('generic_columns', 'exog_re_names_full'),
'fe_params': ('generic_columns', 'xnames'),
'bse_fe': ('generic_columns', 'xnames'),
'cov_re': ('generic_columns_2d', 'exog_re_names'),
'cov_re_unscaled': ('generic_columns_2d', 'exog_re_names'),
}
_upstream_attrs = base.LikelihoodResultsWrapper._wrap_attrs
_wrap_attrs = base.wrap.union_dicts(_attrs, _upstream_attrs)
_methods = {}
_upstream_methods = base.LikelihoodResultsWrapper._wrap_methods
_wrap_methods = base.wrap.union_dicts(_methods, _upstream_methods)
| bsd-3-clause |
phobson/wqio | wqio/tests/test_viz.py | 2 | 22176 | import pytest
import numpy.testing as nptest
import numpy
import pandas
from matplotlib import pyplot
import seaborn
from wqio import viz
from wqio import utils
from wqio.tests import helpers
BASELINE_IMAGES = "_baseline_images/viz_tests"
TOLERANCE = helpers.get_img_tolerance()
seaborn.set(style="ticks")
@pytest.fixture
def plot_data():
data = numpy.array(
[
3.113,
3.606,
4.046,
4.046,
4.710,
6.140,
6.978,
2.000,
4.200,
4.620,
5.570,
5.660,
5.860,
6.650,
6.780,
6.790,
7.500,
7.500,
7.500,
8.630,
8.710,
8.990,
9.850,
10.820,
11.250,
11.250,
12.200,
14.920,
16.770,
17.810,
19.160,
19.190,
19.640,
20.180,
22.970,
]
)
return data
@pytest.fixture
def boxplot_data(plot_data):
bs = [
{
"cihi": 10.82,
"cilo": 6.1399999999999997,
"fliers": numpy.array([22.97]),
"iqr": 6.1099999999999994,
"mean": 9.5888285714285733,
"med": 7.5,
"q1": 5.6150000000000002,
"q3": 11.725,
"whishi": 20.18,
"whislo": 2.0,
}
]
return bs
@pytest.fixture
def whisk_flier_data():
data = [
2.20e-01,
2.70e-01,
3.08e-01,
3.20e-01,
4.10e-01,
4.44e-01,
4.82e-01,
5.46e-01,
6.05e-01,
6.61e-01,
7.16e-01,
7.70e-01,
8.24e-01,
1.00e-03,
4.90e-02,
5.60e-02,
1.40e-01,
1.69e-01,
1.83e-01,
2.06e-01,
2.10e-01,
2.13e-01,
2.86e-01,
3.16e-01,
3.40e-01,
3.57e-01,
3.71e-01,
3.72e-01,
3.78e-01,
3.81e-01,
3.86e-01,
3.89e-01,
3.90e-01,
3.93e-01,
4.00e-01,
4.03e-01,
4.10e-01,
4.10e-01,
4.29e-01,
4.40e-01,
4.40e-01,
4.40e-01,
4.46e-01,
4.46e-01,
4.50e-01,
4.51e-01,
4.52e-01,
4.56e-01,
4.60e-01,
4.66e-01,
4.72e-01,
4.78e-01,
4.81e-01,
4.83e-01,
4.86e-01,
4.89e-01,
4.98e-01,
5.00e-01,
5.00e-01,
5.03e-01,
5.18e-01,
5.32e-01,
5.37e-01,
5.38e-01,
5.68e-01,
5.69e-01,
5.78e-01,
5.88e-01,
5.94e-01,
5.96e-01,
6.02e-01,
6.10e-01,
6.10e-01,
6.10e-01,
6.19e-01,
6.20e-01,
6.20e-01,
6.28e-01,
6.38e-01,
6.39e-01,
6.42e-01,
6.61e-01,
6.71e-01,
6.75e-01,
6.80e-01,
6.96e-01,
7.00e-01,
7.01e-01,
7.09e-01,
7.16e-01,
7.17e-01,
7.30e-01,
7.62e-01,
7.64e-01,
7.69e-01,
7.70e-01,
7.77e-01,
7.80e-01,
8.06e-01,
8.10e-01,
8.23e-01,
8.30e-01,
8.50e-01,
8.50e-01,
8.56e-01,
8.56e-01,
8.80e-01,
8.80e-01,
8.93e-01,
8.96e-01,
8.97e-01,
8.99e-01,
9.22e-01,
9.28e-01,
9.30e-01,
9.64e-01,
9.65e-01,
9.76e-01,
9.79e-01,
9.90e-01,
9.99e-01,
1.00e00,
1.00e00,
1.01e00,
1.02e00,
1.03e00,
1.03e00,
1.03e00,
1.04e00,
1.05e00,
1.05e00,
1.05e00,
1.06e00,
1.07e00,
1.08e00,
1.08e00,
1.10e00,
1.10e00,
1.11e00,
1.12e00,
1.12e00,
1.13e00,
1.14e00,
1.14e00,
1.14e00,
1.15e00,
1.16e00,
1.17e00,
1.17e00,
1.17e00,
1.19e00,
1.19e00,
1.20e00,
1.20e00,
1.21e00,
1.22e00,
1.22e00,
1.23e00,
1.23e00,
1.23e00,
1.25e00,
1.25e00,
1.26e00,
1.26e00,
1.27e00,
1.27e00,
1.28e00,
1.29e00,
1.29e00,
1.30e00,
1.30e00,
1.30e00,
1.31e00,
1.31e00,
1.31e00,
1.32e00,
1.33e00,
1.34e00,
1.35e00,
1.35e00,
1.35e00,
1.36e00,
1.36e00,
1.36e00,
1.36e00,
1.37e00,
1.38e00,
1.39e00,
1.39e00,
1.40e00,
1.41e00,
1.43e00,
1.44e00,
1.44e00,
1.47e00,
1.47e00,
1.48e00,
1.51e00,
1.51e00,
1.53e00,
1.55e00,
1.55e00,
1.55e00,
1.57e00,
1.57e00,
1.57e00,
1.59e00,
1.59e00,
1.60e00,
1.60e00,
1.61e00,
1.62e00,
1.62e00,
1.62e00,
1.62e00,
1.63e00,
1.63e00,
1.63e00,
1.64e00,
1.66e00,
1.68e00,
1.68e00,
1.68e00,
1.68e00,
1.70e00,
1.70e00,
1.71e00,
1.71e00,
1.71e00,
1.74e00,
1.75e00,
1.75e00,
1.75e00,
1.76e00,
1.76e00,
1.77e00,
1.77e00,
1.77e00,
1.78e00,
1.78e00,
1.79e00,
1.79e00,
1.80e00,
1.81e00,
1.81e00,
1.82e00,
1.82e00,
1.82e00,
1.83e00,
1.85e00,
1.85e00,
1.85e00,
1.85e00,
1.86e00,
1.86e00,
1.86e00,
1.86e00,
1.87e00,
1.87e00,
1.89e00,
1.90e00,
1.91e00,
1.92e00,
1.92e00,
1.92e00,
1.94e00,
1.95e00,
1.95e00,
1.95e00,
1.96e00,
1.96e00,
1.97e00,
1.97e00,
1.97e00,
1.97e00,
1.98e00,
1.99e00,
1.99e00,
1.99e00,
2.00e00,
2.00e00,
2.00e00,
2.01e00,
2.01e00,
2.01e00,
2.02e00,
2.04e00,
2.05e00,
2.06e00,
2.06e00,
2.06e00,
2.07e00,
2.08e00,
2.09e00,
2.09e00,
2.10e00,
2.10e00,
2.11e00,
2.11e00,
2.12e00,
2.12e00,
2.12e00,
2.13e00,
2.13e00,
2.13e00,
2.14e00,
2.14e00,
2.14e00,
2.14e00,
2.14e00,
2.15e00,
2.16e00,
2.17e00,
2.18e00,
2.18e00,
2.18e00,
2.19e00,
2.19e00,
2.19e00,
2.19e00,
2.19e00,
2.21e00,
2.23e00,
2.23e00,
2.23e00,
2.25e00,
2.25e00,
2.25e00,
2.25e00,
2.26e00,
2.26e00,
2.26e00,
2.26e00,
2.26e00,
2.27e00,
2.27e00,
2.28e00,
2.28e00,
2.28e00,
2.29e00,
2.29e00,
2.29e00,
2.30e00,
2.31e00,
2.32e00,
2.33e00,
2.33e00,
2.33e00,
2.33e00,
2.34e00,
2.36e00,
2.38e00,
2.38e00,
2.39e00,
2.39e00,
2.39e00,
2.41e00,
2.42e00,
2.43e00,
2.45e00,
2.45e00,
2.47e00,
2.48e00,
2.49e00,
2.49e00,
2.49e00,
2.50e00,
2.51e00,
2.51e00,
2.52e00,
2.53e00,
2.53e00,
2.54e00,
2.54e00,
2.56e00,
2.58e00,
2.59e00,
2.59e00,
2.60e00,
2.61e00,
2.61e00,
2.61e00,
2.62e00,
2.62e00,
2.63e00,
2.65e00,
2.65e00,
2.66e00,
2.66e00,
2.68e00,
2.69e00,
2.69e00,
2.70e00,
2.72e00,
2.72e00,
2.73e00,
2.75e00,
2.77e00,
2.78e00,
2.79e00,
2.81e00,
2.81e00,
2.82e00,
2.84e00,
2.84e00,
2.85e00,
2.85e00,
2.86e00,
2.86e00,
2.88e00,
2.92e00,
2.93e00,
2.93e00,
2.95e00,
2.96e00,
2.96e00,
2.99e00,
3.00e00,
3.01e00,
3.02e00,
3.03e00,
3.03e00,
3.14e00,
3.15e00,
3.16e00,
3.17e00,
3.17e00,
3.18e00,
3.18e00,
3.19e00,
3.20e00,
3.22e00,
3.24e00,
3.25e00,
3.29e00,
3.31e00,
3.32e00,
3.32e00,
3.34e00,
3.35e00,
3.36e00,
3.38e00,
3.44e00,
3.45e00,
3.46e00,
3.48e00,
3.49e00,
3.53e00,
3.59e00,
3.63e00,
3.70e00,
3.70e00,
3.76e00,
3.80e00,
3.80e00,
3.80e00,
3.83e00,
3.84e00,
3.88e00,
3.90e00,
3.91e00,
3.96e00,
3.97e00,
3.97e00,
4.02e00,
4.03e00,
4.06e00,
4.12e00,
4.19e00,
4.21e00,
4.53e00,
4.56e00,
4.61e00,
4.62e00,
4.73e00,
5.13e00,
5.21e00,
5.40e00,
5.98e00,
6.12e00,
6.94e00,
7.38e00,
7.56e00,
8.06e00,
1.38e01,
1.51e01,
1.82e01,
]
return data
@pytest.fixture
@helpers.seed
def jp_data():
pyplot.rcdefaults()
N = 37
df = pandas.DataFrame(
{
"A": numpy.random.normal(size=N),
"B": numpy.random.lognormal(mean=0.25, sigma=1.25, size=N),
"C": numpy.random.lognormal(mean=1.25, sigma=0.75, size=N),
}
)
return df
@pytest.fixture
@helpers.seed
def cat_hist_data():
N = 100
years = [2011, 2012, 2013, 2014]
df = pandas.DataFrame(
{
"depth": numpy.random.uniform(low=0.2, high=39, size=N),
"year": numpy.random.choice(years, size=N),
"has_outflow": numpy.random.choice([False, True], size=N),
}
)
return df
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_rotateTickLabels_xaxis():
fig, ax = pyplot.subplots()
ax.set_xticks([1, 2, 3])
ax.set_xticklabels(["AAA", "BBB", "CCC"])
viz.rotateTickLabels(ax, 60, "x")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_rotateTickLabels_yaxis():
fig, ax = pyplot.subplots()
ax.set_yticks([1, 2, 3])
ax.set_yticklabels(["AAA", "BBB", "CCC"])
viz.rotateTickLabels(ax, -30, "y")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_rotateTickLabels_both():
fig, ax = pyplot.subplots()
ax.set_xticks([1, 2, 3])
ax.set_xticklabels(["XXX", "YYY", "ZZZ"])
ax.set_yticks([1, 2, 3])
ax.set_yticklabels(["AAA", "BBB", "CCC"])
viz.rotateTickLabels(ax, 45, "both")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_log_formatter():
fig, ax = pyplot.subplots()
ax.plot([1, 5], [0.0005, 5e6])
ax.set_yscale("log")
ax.set_ylim([0.0001, 1e7])
ax.yaxis.set_major_formatter(viz.log_formatter())
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_log_formatter_alt():
fig, ax = pyplot.subplots()
ax.plot([1, 5], [0.0005, 5e6])
ax.set_yscale("log")
ax.set_ylim([0.0001, 1e7])
ax.yaxis.set_major_formatter(viz.log_formatter(use_1x=False))
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_log_formatter_alt_2():
fig, (ax1, ax2) = pyplot.subplots(ncols=2)
ax1.set_yscale("log")
ax1.set_ylim((1e-4, 1e5))
ax1.yaxis.set_major_formatter(viz.log_formatter(threshold=4))
ax2.set_yscale("log")
ax2.set_ylim((1e-7, 1e5))
ax2.yaxis.set_major_formatter(viz.log_formatter(threshold=4, use_1x=False))
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_gridlines_basic(plot_data):
fig, ax = pyplot.subplots()
ax.plot(plot_data)
viz.gridlines(ax, "xlabel", "ylabel")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_gridlines_ylog(plot_data):
fig, ax = pyplot.subplots()
ax.plot(plot_data)
viz.gridlines(ax, "xlabel", "ylabel", yscale="log")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_gridlines_ylog_noyminor(plot_data):
fig, ax = pyplot.subplots()
ax.plot(plot_data)
viz.gridlines(ax, "xlabel", "ylabel", yscale="log", yminor=False)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_one2one():
fig, ax = pyplot.subplots()
ax.set_xlim([-2, 5])
ax.set_ylim([-3, 3])
viz.one2one(ax, label="Equality", lw=5, ls="--")
ax.legend()
return fig
def test_jointplot_defaultlabels(jp_data):
jg1 = viz.jointplot(x="B", y="C", data=jp_data, one2one=False, color="b")
assert jg1.ax_joint.get_xlabel() == "B"
assert jg1.ax_joint.get_ylabel() == "C"
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_xlim()), [0, 17])
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_ylim()), [0, 23])
return jg1.fig
def test_jointplot_xlabeled(jp_data):
jg2 = viz.jointplot(
x="B", y="C", data=jp_data, one2one=False, color="g", xlabel="Quantity B"
)
assert jg2.ax_joint.get_xlabel() == "Quantity B"
return jg2.fig
def test_jointplot_ylabeled(jp_data):
jg3 = viz.jointplot(
x="B", y="C", data=jp_data, one2one=False, color="r", ylabel="Quantity C"
)
assert jg3.ax_joint.get_ylabel() == "Quantity C"
return jg3.fig
def test_jointplot_bothlabeled(jp_data):
jg4 = viz.jointplot(
x="B",
y="C",
data=jp_data,
one2one=False,
color="k",
xlabel="Quantity B",
ylabel="Quantity C",
)
assert jg4.ax_joint.get_xlabel() == "Quantity B"
assert jg4.ax_joint.get_ylabel() == "Quantity C"
return jg4.fig
def test_jointplot_zerominFalse(jp_data):
jg1 = viz.jointplot(x="A", y="C", data=jp_data, zeromin=False, one2one=False)
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_xlim()), [-4, 4])
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_ylim()), [-7, 23])
return jg1.fig
def test_jointplot_one2one(jp_data):
jg1 = viz.jointplot(x="B", y="C", data=jp_data, one2one=True)
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_xlim()), [0, 23])
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_ylim()), [0, 23])
return jg1.fig
def test_jointplot_one2one_zerominFalse(jp_data):
jg1 = viz.jointplot(x="A", y="C", data=jp_data, one2one=True, zeromin=False)
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_xlim()), [-7, 23])
nptest.assert_array_equal(numpy.round(jg1.ax_joint.get_ylim()), [-7, 23])
return jg1.fig
@pytest.mark.parametrize(
("case", "xform_in", "xform_out"),
[
("linear", utils.no_op, utils.no_op),
("natlog", numpy.log, numpy.exp),
("log10", numpy.log10, lambda x: 10 ** x),
],
)
@pytest.mark.parametrize("key", ["whishi", "whislo", "fliers"])
def test_whiskers_and_fliers(whisk_flier_data, case, xform_in, xform_out, key):
expected_results = {
"linear": {
"whishi": 4.62,
"whislo": 0.00111,
"fliers": numpy.array(
[
4.730,
5.130,
5.210,
5.400,
5.980,
6.120,
6.940,
7.380,
7.560,
8.060,
13.800,
15.100,
18.200,
]
),
},
"natlog": {
"whishi": 8.060,
"whislo": 0.2700,
"fliers": numpy.array(
[
2.200e-01,
1.000e-03,
4.900e-02,
5.600e-02,
1.400e-01,
1.690e-01,
1.830e-01,
2.060e-01,
2.100e-01,
2.130e-01,
1.380e01,
1.510e01,
1.820e01,
]
),
},
"log10": {
"whishi": 8.060,
"whislo": 0.2700,
"fliers": numpy.array(
[
2.200e-01,
1.000e-03,
4.900e-02,
5.600e-02,
1.400e-01,
1.690e-01,
1.830e-01,
2.060e-01,
2.100e-01,
2.130e-01,
1.380e01,
1.510e01,
1.820e01,
]
),
},
}
q1 = numpy.percentile(whisk_flier_data, 25)
q3 = numpy.percentile(whisk_flier_data, 75)
result = viz.whiskers_and_fliers(
xform_in(whisk_flier_data), xform_in(q1), xform_in(q3), transformout=xform_out
)
expected = expected_results[case]
nptest.assert_array_almost_equal(result[key], expected[key], decimal=3)
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_boxplot_basic(boxplot_data):
fig, ax = pyplot.subplots()
viz.boxplot(boxplot_data, ax=ax, shownotches=True, patch_artist=False)
ax.set_xlim((0, 2))
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_boxplot_with_mean(boxplot_data):
fig, ax = pyplot.subplots()
viz.boxplot(
boxplot_data,
ax=ax,
shownotches=True,
patch_artist=True,
marker="^",
color="red",
showmean=True,
)
ax.set_xlim((0, 2))
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_prob(plot_data):
fig, ax = pyplot.subplots()
fig = viz.probplot(plot_data, ax=ax, xlabel="Test xlabel")
assert isinstance(fig, pyplot.Figure)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_qq(plot_data):
fig, ax = pyplot.subplots()
fig = viz.probplot(
plot_data, ax=ax, axtype="qq", ylabel="Test label", scatter_kws=dict(color="r")
)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_pp(plot_data):
fig, ax = pyplot.subplots()
scatter_kws = dict(
color="b", linestyle="--", markeredgecolor="g", markerfacecolor="none"
)
fig = viz.probplot(
plot_data,
ax=ax,
axtype="pp",
yscale="linear",
xlabel="test x",
ylabel="test y",
scatter_kws=scatter_kws,
)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_prob_bestfit(plot_data):
fig, ax = pyplot.subplots()
fig = viz.probplot(plot_data, ax=ax, xlabel="Test xlabel", bestfit=True)
assert isinstance(fig, pyplot.Figure)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_qq_bestfit(plot_data):
fig, ax = pyplot.subplots()
fig = viz.probplot(plot_data, ax=ax, axtype="qq", bestfit=True, ylabel="Test label")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_probplot_pp_bestfit(plot_data):
fig, ax = pyplot.subplots()
scatter_kws = {"marker": "s", "color": "red"}
line_kws = {"linestyle": "--", "linewidth": 3}
fig = viz.probplot(
plot_data,
ax=ax,
axtype="pp",
yscale="linear",
xlabel="test x",
bestfit=True,
ylabel="test y",
scatter_kws=scatter_kws,
line_kws=line_kws,
)
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test__connect_spines():
fig, ((ax1, ax2), (ax3, ax4)) = pyplot.subplots(ncols=2, nrows=2)
viz._connect_spines(ax1, ax2, 0.5, 0.75)
viz._connect_spines(ax3, ax4, 0.9, 0.1, linewidth=2, color="r", linestyle="dashed")
return fig
@pytest.mark.xfail
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_parallel_coordinates():
df = seaborn.load_dataset("iris")
fig = viz.parallel_coordinates(df, hue="species")
return fig
@pytest.mark.xfail(TOLERANCE > 15, reason="GH Action weirdness")
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_categorical_histogram_simple(cat_hist_data):
bins = numpy.arange(5, 35, 5)
fig = viz.categorical_histogram(cat_hist_data, "depth", bins)
return fig.fig
@pytest.mark.xfail(TOLERANCE > 15, reason="GH Action weirdness")
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=TOLERANCE)
def test_categorical_histogram_complex(cat_hist_data):
bins = numpy.arange(5, 35, 5)
fig = viz.categorical_histogram(
cat_hist_data, "depth", bins, hue="year", row="has_outflow"
)
return fig.fig
| bsd-3-clause |
rongzh/unbiased-pscale | data/storage/database/data-files/Jamieson1982/Jamieson1982-UpUsplot.py | 2 | 1117 | import numpy as np
import matplotlib.pyplot as plt
Audat = np.loadtxt(fname='Au-Los-Alamos.md', delimiter='|', skiprows=3)
Audat1 = np.loadtxt(fname='Au-Jones.md', delimiter='|',skiprows=3)
Audat2 = np.loadtxt(fname='Au-Altschuler.md',delimiter='|',skiprows=3)
plt.ion()
plt.plot(Audat[:,0],Audat[:,1],'ko')
plt.plot(Audat1[:,0],Audat1[:,1],'ko',markerfacecolor = 'red')
plt.plot(Audat2[:,0],Audat2[:,1],'ko',markerfacecolor = 'blue')
plt.xlabel('Up')
plt.ylabel('Us')
plt.xlim([0,4])
plt.ylim([2,12])
plt.title('Jamieson1982 Au Plot')
plt.draw()
import numpy as np
import matplotlib.pyplot as plt
Ptdat = np.loadtxt(fname='Pt.md', delimiter='|', skiprows=3)
plt.ion()
plt.plot(Ptdat[:,0],Ptdat[:,1],'ko')
plt.xlabel('Up')
plt.ylabel('Us')
plt.xlim([0,4])
plt.ylim([2,12])
plt.title('Jamieson1982 Pt Plot')
plt.draw()
import numpy as np
import matplotlib.pyplot as plt
MgOdat = np.loadtxt(fname='MgO-Los-Alamos.md', delimiter='|', skiprows=3)
plt.ion()
plt.plot(MgOdat[:,0],MgOdat[:,1],'ko')
plt.xlabel('Up')
plt.ylabel('Us')
plt.xlim([0,4])
plt.ylim([2,12])
plt.title('Jamieson1982 MgO Plot')
plt.draw() | mit |
StefReck/Km3-Autoencoder | scripts/Deconv_Test.py | 1 | 5890 | # -*- coding: utf-8 -*-
from keras import initializers
from keras.models import Sequential, Model
from keras.layers import Dense, Lambda, Input, Activation, Conv3D, Conv2D, Conv2DTranspose, Conv3DTranspose, MaxPooling2D, MaxPooling3D, AveragePooling3D, UpSampling3D, UpSampling2D, Input, Lambda
import numpy as np
import matplotlib.pyplot as plt
from keras import backend as K
import h5py
from util.custom_layers import MaxUnpooling3D
from compare_hists import reshape_3d_to_3d
def make_3d_plots_xyz(hist_org, hist_pred, title1, title2, suptitle=None):
#Plot original and predicted histogram side by side in one plot
#input format: [x,y,z,val]
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(121, projection='3d', aspect='equal')
max_value1= np.amax(hist_org)
min_value1 = np.amin(hist_org) #min value is usually 0, but who knows if the autoencoder screwed up
fraction1=(hist_org[3]-min_value1)/max_value1
plot1 = ax1.scatter(hist_org[0],hist_org[1],hist_org[2], c=hist_org[3], s=8*36*fraction1, rasterized=True)
cbar1=fig.colorbar(plot1,fraction=0.046, pad=0.1)
cbar1.set_label('Hits', rotation=270, labelpad=0.1)
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_zlabel('Z')
ax1.set_title(title1)
ax2 = fig.add_subplot(122, projection='3d', aspect='equal')
max_value2=np.amax(hist_pred)
min_value2=np.amin(hist_pred)
fraction2=(hist_pred[3]-min_value2)/max_value2
plot2 = ax2.scatter(hist_pred[0],hist_pred[1],hist_pred[2], c=hist_pred[3], s=8*36*fraction2, rasterized=True)
cbar2=fig.colorbar(plot2,fraction=0.046, pad=0.1)
cbar2.set_label('Hits', rotation=270, labelpad=0.1)
ax2.set_xlabel('X')
ax2.set_ylabel('Y')
ax2.set_zlabel('Z')
ax2.set_title(title2)
if suptitle is not None: fig.suptitle(suptitle)
fig.tight_layout()
def compare_hists_xyz(hist_org, hist_pred, name, suptitle=None):
make_3d_plots_xyz(reshape_3d_to_3d(hist_org), reshape_3d_to_3d(hist_pred), title1="MaxPooling", title2="AveragePooling", suptitle=suptitle)
plt.savefig(name)
plt.close()
#2D Model
"""
inputs=Input(shape=(2,2,1))
x = Lambda(MaxUnpooling2D,MaxUnpooling2D_output_shape)(inputs)
model = Model(inputs=inputs, outputs=x)
mat=np.linspace(1,8,8).reshape((2,2,2,1))
res=model.predict(mat)
print(res)
"""
def make_updown_array():
datafile = "/home/woody/capn/mppi033h/Data/ORCA_JTE_NEMOWATER/h5_input_projections_3-100GeV/4dTo3d/h5/xzt/concatenated/test_muon-CC_and_elec-CC_each_60_xzt_shuffled.h5"
file = h5py.File(datafile, "r")
labels=file["y"]
energy = labels[:,2]
dir_z = labels[:,7]
down_going_events=energy[dir_z>0]
tot_events = len(energy)
average= float(len(down_going_events))/len(energy)
print("Total percentage of down-going events: ", average)
plot_range=(3,100)
hist_1d_energy = np.histogram(energy, bins=98, range=plot_range) #häufigkeit von energien
hist_1d_energy_correct = np.histogram(down_going_events, bins=98, range=plot_range) #häufigkeit von richtigen energien
bin_edges = hist_1d_energy[1]
hist_1d_energy_accuracy_bins = np.divide(hist_1d_energy_correct[0], hist_1d_energy[0], dtype=np.float32) #rel häufigkeit von richtigen energien
# For making it work with matplotlib step plot
#hist_1d_energy_accuracy_bins_leading_zero = np.hstack((0, hist_1d_energy_accuracy_bins))
bin_edges_centered = bin_edges[:-1] + 0.5
return [bin_edges_centered, hist_1d_energy_accuracy_bins, average]
#bin_edges_centered, hist_1d_energy_accuracy_bins, average = make_up_down_array()
bin_edges_centered, hist_1d_energy_accuracy_bins = np.load("Daten/xzt_test_data_updown.npy")
#The average percentage
average=0.5367258059801829
plt.axhline(average, color="orange", ls="--")
plt_bar_1d_energy_accuracy = plt.step(bin_edges_centered, hist_1d_energy_accuracy_bins, where='mid')
x_ticks_major = np.arange(0, 101, 10)
plt.xticks(x_ticks_major)
plt.yticks(np.arange(0.4,0.6,0.02))
plt.minorticks_on()
plt.xlabel('Energy [GeV]')
plt.ylabel('Fraction')
plt.ylim((0.4, 0.6))
plt.title("Fraction of down-going events in xzt simulated test data")
plt.grid(True)
plt.text(11, average+0.005, "Total avg.: "+str(average*100)[:5]+" %", color="orange", fontsize=10, bbox=dict(facecolor='white', color="white", alpha=0.5))
plt.show()
test_file = 'Daten/JTE_KM3Sim_gseagen_muon-CC_3-100GeV-9_1E7-1bin-3_0gspec_ORCA115_9m_2016_588_xyz.h5'
file=h5py.File(test_file , 'r')
which=[3]
hists = file["x"][which].reshape((1,11,13,18,1))
# event_track: [event_id, particle_type, energy, isCC, bjorkeny, dir_x/y/z, time]
labels=file["y"][which]
#compare_hists_xzt(hists[0], model3.predict(hists)[0], suptitle="C Average Pooling")
#compare_hists_xzt(hists[0], layer_1_output[0], suptitle="Max Pooling Intermediate Layer")
#compare_hists_xzt(hists[0], layer_1_output_2[0], suptitle="Average Pooling Intermediate Layer")
#plot_hist(layer_1_output[0])
#plot_hist(layer_1_output_2[0])
"""
mat=np.linspace(1,8,8).reshape((1,2,2,2,1))
res=model.predict(mat)
print(res)
inputs = Input(shape=(5,1))
x = Lambda(MaxUnpooling1D, output_shape=out_shape)(inputs)
model = Model(inputs, x)
model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])
"""
"""
inp = np.ones((1,3,3,1))
prediction = model.predict_on_batch(inp).astype(int)
def display():
print(inp.reshape(2,2,2))
print(prediction.reshape(3,3,3))
weights=[]
for layer in model.layers:
weights.append(layer.get_weights())
model.save('test.h5')
del model
inputs2 = Input(shape=(3,3,1))
x2 = Conv2D(filters=2, kernel_size=(2,2), padding='valid', activation='relu', kernel_initializer=initializers.Constant(value=1))(inputs2)
encoder = Model(inputs2, x2)
encoder.load_weights('test.h5', by_name=True)
weights2=[]
for layer in encoder.layers:
weights2.append(layer.get_weights())
""" | mit |
ruiminshen/yolo-tf | demo_data_augmentation.py | 1 | 3788 | """
Copyright (C) 2017, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import argparse
import configparser
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import utils.data
import utils.visualize
def main():
model = config.get('config', 'model')
cachedir = utils.get_cachedir(config)
with open(os.path.join(cachedir, 'names'), 'r') as f:
names = [line.strip() for line in f]
width = config.getint(model, 'width')
height = config.getint(model, 'height')
cell_width, cell_height = utils.calc_cell_width_height(config, width, height)
tf.logging.info('(width, height)=(%d, %d), (cell_width, cell_height)=(%d, %d)' % (width, height, cell_width, cell_height))
batch_size = args.rows * args.cols
paths = [os.path.join(cachedir, profile + '.tfrecord') for profile in args.profile]
num_examples = sum(sum(1 for _ in tf.python_io.tf_record_iterator(path)) for path in paths)
tf.logging.warn('num_examples=%d' % num_examples)
with tf.Session() as sess:
with tf.name_scope('batch'):
image_rgb, labels = utils.data.load_image_labels(paths, len(names), width, height, cell_width, cell_height, config)
batch = tf.train.shuffle_batch((tf.cast(image_rgb, tf.uint8),) + labels, batch_size=batch_size,
capacity=config.getint('queue', 'capacity'), min_after_dequeue=config.getint('queue', 'min_after_dequeue'), num_threads=multiprocessing.cpu_count()
)
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
batch_image, batch_labels = sess.run([batch[0], batch[1:]])
coord.request_stop()
coord.join(threads)
batch_image = batch_image.astype(np.uint8)
fig, axes = plt.subplots(args.rows, args.cols)
for b, (ax, image) in enumerate(zip(axes.flat, batch_image)):
ax.imshow(image)
utils.visualize.draw_labels(ax, names, width, height, cell_width, cell_height, *[l[b] for l in batch_labels])
if args.grid:
ax.set_xticks(np.arange(0, width, width / cell_width))
ax.set_yticks(np.arange(0, height, height / cell_height))
ax.grid(which='both')
ax.tick_params(labelbottom='off', labelleft='off')
else:
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', nargs='+', default=['config.ini'], help='config file')
parser.add_argument('-p', '--profile', nargs='+', default=['train', 'val'])
parser.add_argument('-g', '--grid', action='store_true')
parser.add_argument('--rows', default=5, type=int)
parser.add_argument('--cols', default=5, type=int)
parser.add_argument('--level', default='info', help='logging level')
return parser.parse_args()
if __name__ == '__main__':
args = make_args()
config = configparser.ConfigParser()
utils.load_config(config, args.config)
if args.level:
tf.logging.set_verbosity(args.level.upper())
main()
| lgpl-3.0 |
nborggren/zipline | tests/data/test_us_equity_pricing.py | 2 | 11697 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import (
arange,
datetime64,
)
from numpy.testing import (
assert_array_equal,
)
from pandas import (
DataFrame,
DatetimeIndex,
Timestamp,
)
from pandas.util.testing import assert_index_equal
from testfixtures import TempDirectory
from zipline.pipeline.loaders.synthetic import (
SyntheticDailyBarWriter,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
NoDataOnDate
)
from zipline.finance.trading import TradingEnvironment
from zipline.pipeline.data import USEquityPricing
from zipline.utils.test_utils import (
seconds_to_timestamp,
)
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
TEST_QUERY_ASSETS = EQUITY_INFO.index
class BcolzDailyBarTestCase(TestCase):
@classmethod
def setUpClass(cls):
all_trading_days = TradingEnvironment().trading_days
cls.trading_days = all_trading_days[
all_trading_days.get_loc(TEST_CALENDAR_START):
all_trading_days.get_loc(TEST_CALENDAR_STOP) + 1
]
def setUp(self):
self.asset_info = EQUITY_INFO
self.writer = SyntheticDailyBarWriter(
self.asset_info,
self.trading_days,
)
self.dir_ = TempDirectory()
self.dir_.create()
self.dest = self.dir_.getpath('daily_equity_pricing.bcolz')
def tearDown(self):
self.dir_.cleanup()
@property
def assets(self):
return self.asset_info.index
def trading_days_between(self, start, end):
return self.trading_days[self.trading_days.slice_indexer(start, end)]
def asset_start(self, asset_id):
return self.writer.asset_start(asset_id)
def asset_end(self, asset_id):
return self.writer.asset_end(asset_id)
def dates_for_asset(self, asset_id):
start, end = self.asset_start(asset_id), self.asset_end(asset_id)
return self.trading_days_between(start, end)
def test_write_ohlcv_content(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
for column in SyntheticDailyBarWriter.OHLCV:
idx = 0
data = result[column][:]
multiplier = 1 if column == 'volume' else 1000
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(
SyntheticDailyBarWriter.expected_value(
asset_id,
date,
column
) * multiplier,
data[idx],
)
idx += 1
self.assertEqual(idx, len(data))
def test_write_day_and_id(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
idx = 0
ids = result['id']
days = result['day']
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(ids[idx], asset_id)
self.assertEqual(date, seconds_to_timestamp(days[idx]))
idx += 1
def test_write_attrs(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
expected_first_row = {
'1': 0,
'2': 5, # Asset 1 has 5 trading days.
'3': 12, # Asset 2 has 7 trading days.
'4': 33, # Asset 3 has 21 trading days.
'5': 44, # Asset 4 has 11 trading days.
'6': 49, # Asset 5 has 5 trading days.
}
expected_last_row = {
'1': 4,
'2': 11,
'3': 32,
'4': 43,
'5': 48,
'6': 57, # Asset 6 has 9 trading days.
}
expected_calendar_offset = {
'1': 0, # Starts on 6-01, 1st trading day of month.
'2': 15, # Starts on 6-22, 16th trading day of month.
'3': 1, # Starts on 6-02, 2nd trading day of month.
'4': 0, # Starts on 6-01, 1st trading day of month.
'5': 9, # Starts on 6-12, 10th trading day of month.
'6': 10, # Starts on 6-15, 11th trading day of month.
}
self.assertEqual(result.attrs['first_row'], expected_first_row)
self.assertEqual(result.attrs['last_row'], expected_last_row)
self.assertEqual(
result.attrs['calendar_offset'],
expected_calendar_offset,
)
assert_index_equal(
self.trading_days,
DatetimeIndex(result.attrs['calendar'], tz='UTC'),
)
def _check_read_results(self, columns, assets, start_date, end_date):
table = self.writer.write(self.dest, self.trading_days, self.assets)
reader = BcolzDailyBarReader(table)
results = reader.load_raw_arrays(columns, start_date, end_date, assets)
dates = self.trading_days_between(start_date, end_date)
for column, result in zip(columns, results):
assert_array_equal(
result,
self.writer.expected_values_2d(
dates,
assets,
column.name,
)
)
@parameterized.expand([
([USEquityPricing.open],),
([USEquityPricing.close, USEquityPricing.volume],),
([USEquityPricing.volume, USEquityPricing.high, USEquityPricing.low],),
(USEquityPricing.columns,),
])
def test_read(self, columns):
self._check_read_results(
columns,
self.assets,
TEST_QUERY_START,
TEST_QUERY_STOP,
)
def test_start_on_asset_start(self):
"""
Test loading with queries that starts on the first day of each asset's
lifetime.
"""
columns = [USEquityPricing.high, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_start(asset),
end_date=self.trading_days[-1],
)
def test_start_on_asset_end(self):
"""
Test loading with queries that start on the last day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_end(asset),
end_date=self.trading_days[-1],
)
def test_end_on_asset_start(self):
"""
Test loading with queries that end on the first day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.trading_days[0],
end_date=self.asset_start(asset),
)
def test_end_on_asset_end(self):
"""
Test loading with queries that end on the last day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.trading_days[0],
end_date=self.asset_end(asset),
)
def test_unadjusted_spot_price(self):
table = self.writer.write(self.dest, self.trading_days, self.assets)
reader = BcolzDailyBarReader(table)
# At beginning
price = reader.spot_price(1, Timestamp('2015-06-01', tz='UTC'),
'close')
# Synthetic writes price for date.
self.assertEqual(135630.0, price)
# Middle
price = reader.spot_price(1, Timestamp('2015-06-02', tz='UTC'),
'close')
self.assertEqual(135631.0, price)
# End
price = reader.spot_price(1, Timestamp('2015-06-05', tz='UTC'),
'close')
self.assertEqual(135634.0, price)
# Another sid at beginning.
price = reader.spot_price(2, Timestamp('2015-06-22', tz='UTC'),
'close')
self.assertEqual(235651.0, price)
# Ensure that volume does not have float adjustment applied.
volume = reader.spot_price(1, Timestamp('2015-06-02', tz='UTC'),
'volume')
self.assertEqual(145631, volume)
def test_unadjusted_spot_price_no_data(self):
table = self.writer.write(self.dest, self.trading_days, self.assets)
reader = BcolzDailyBarReader(table)
# before
with self.assertRaises(NoDataOnDate):
reader.spot_price(2, Timestamp('2015-06-08', tz='UTC'), 'close')
# after
with self.assertRaises(NoDataOnDate):
reader.spot_price(4, Timestamp('2015-06-16', tz='UTC'), 'close')
def test_unadjusted_spot_price_empty_value(self):
table = self.writer.write(self.dest, self.trading_days, self.assets)
reader = BcolzDailyBarReader(table)
# A sid, day and corresponding index into which to overwrite a zero.
zero_sid = 1
zero_day = Timestamp('2015-06-02', tz='UTC')
zero_ix = reader.sid_day_index(zero_sid, zero_day)
# Write a zero into the synthetic pricing data at the day and sid,
# so that a read should now return -1.
# This a little hacky, in lieu of changing the synthetic data set.
reader._spot_col('close')[zero_ix] = 0
close = reader.spot_price(zero_sid, zero_day, 'close')
self.assertEqual(-1, close)
| apache-2.0 |
rupakc/Kaggle-Compendium | Personalized Medicine - Redefining Cancer Treatment/preprocess.py | 4 | 9418 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 22:25:49 2016
Preprocessing Utilities for cleaning and processing of data
@author: Rupak Chakraborty
"""
import pandas as pd
from nltk.corpus import stopwords
import string
from nltk.stem import PorterStemmer
stopword_list = set(stopwords.words("english"))
punctuation_list = list(string.punctuation)
ps = PorterStemmer()
months_list = ["january","february","march","april","may","june","july","august",
"september","october","november","december"]
digit_list = ["0","1","2","3","4","5","6","7","8","9"]
month_list_short = ["jan","feb","mar","apr","may","jun","jul","aug","sept","oct","nov","dec"]
emoticon_list = [":)",":(","^_^","-_-","<3",":D",":P",":/"]
html_tag_list = [" ","<",">","&",";","<strong>","<em>","[1]","</strong>","</em>","<div>","</div>","<b>","</b>","[2]","[3]","...","[img]","[/img]","<u>","</u>","<p>","</p>","\n","\\t","<span>",
"</span>","[Moved]","<br/>","<a>","</a>",""","<br>","<br />","Â","<a rel=\"nofollow\" class=\"ot-hashtag\"","'","<a","’","'"]
extend_punct_list = [' ',',',':',';','\'','\t','\n','?','-','$',"!!","?","w/","!","!!!","w/","'","RT","rt","@","#","/",":)",
":(",":D","^_^","^","...","&","\\",":","?","<",">","$","%","*","`","~","-","_",
"+","=","{","}","[","]","|","\"",",",";",")","(","r/","/u/","*","-"]
punctuation_list.extend(extend_punct_list)
#punctuation_list.remove(".")
months_list.extend(month_list_short)
"""
Given a string normalizes it, i.e. converts it to lowercase and strips it of extra spaces
Params:
--------
s - String which is to be normalized
Returns:
---------
String in the normalized form
"""
def normalize_string(s):
s = s.lower()
s = s.strip()
return s
"""
Given a list of strings normalizes the strings
Params:
-------
string_list - List containing the strings which are to be normalized
Returns:
---------
Returns a list containing the normalized string list
"""
def normalize_string_list(string_list):
normalized_list = []
for sentence in string_list:
normalized_list.append(normalize_string(sentence))
return normalized_list
"""
Given a string and a separator splits up the string in the tokens
Params:
--------
s - string which has to be tokenized
separator - separator based on which the string is to be tokenized
Returns:
---------
A list of words in the sentence based on the separator
"""
def tokenize_string(s,separator):
word_list = list([])
if isinstance(s,basestring):
word_list = s.split(separator)
return word_list
"""
Given a list of sentences tokenizes each sentence in the list
Params:
--------
string_list - List of sentences which have to be tokenized
separator - Separator based on which the sentences have to be tokenized
"""
def tokenize_string_list(string_list,separator):
tokenized_sentence_list = []
for sentence in string_list:
#sentence = sentence.encode("ascii","ignore")
tokenized_sentence_list.append(tokenize_string(sentence,separator))
return tokenized_sentence_list
"""
Given a string containing stopwords removes all the stopwords
Params:
--------
s - String containing the stopwords which are to be removed
Returns:
---------
String sans the stopwords
"""
def remove_stopwords(s):
s = s.lower()
removed_string = ''
words = s.split()
for word in words:
if word not in stopword_list:
removed_string = removed_string + word.strip() + " "
return removed_string.strip()
"""
Given a list of sentences and a filename, writes the sentences to the file
Params:
--------
sentence_list - List of sentences which have to be written to the file
filename - File to which the sentences have to be written
Returns:
---------
Nothing quite just writes the sentences to the file
"""
def write_sentences_to_file(sentence_list,filename):
write_file = open(filename,'w')
for sentence in sentence_list:
write_file.write(encode_ascii(sentence) + '\n')
write_file.flush()
write_file.close()
"""
Removes all the punctuations from a given string
Params:
--------
s - String containing the possible punctuations
Returns:
--------
String without the punctuations (including new lines and tabs)
"""
def remove_punctuations(s):
s = s.lower()
s = s.strip()
for punctuation in punctuation_list:
s = s.replace(punctuation,' ')
return s.strip()
"""
Strips a given string of HTML tags
Params:
--------
s - String from which the HTML tags have to be removed
Returns:
---------
String sans the HTML tags
"""
def remove_html_tags(s):
for tag in html_tag_list:
s = s.replace(tag,' ')
return s
"""
Given a string removes all the digits from them
Params:
-------
s - String from which the digits need to be removed
Returns:
---------
String without occurence of the digits
"""
def remove_digits(s):
for digit in digit_list:
s = s.replace(digit,'')
return s
"""
Given a string returns all occurences of a month from it
Params:
--------
s - String containing possible month names
Returns:
--------
String wihtout the occurence of the months
"""
def remove_months(s):
s = s.lower()
words = s.split()
without_month_list = [word for word in words if word not in months_list]
month_clean_string = ""
for word in without_month_list:
month_clean_string = month_clean_string + word + " "
return month_clean_string.strip()
"""
Checks if a given string contains all ASCII characters
Params:
-------
s - String which is to be checked for ASCII characters
Returns:
--------
True if the string contains all ASCII characters, False otherwise
"""
def is_ascii(s):
if isinstance(s,basestring):
return all(ord(c) < 128 for c in s)
return False
"""
Given a string encodes it in ascii format
Params:
--------
s - String which is to be encoded
Returns:
--------
String encoded in ascii format
"""
def encode_ascii(s):
return s.encode('ascii','ignore')
"""
Stems each word of a given sentence to it's root word using Porters Stemmer
Params:
--------
sentence - String containing the sentence which is to be stemmed
Returns:
---------
Sentence where each word has been stemmed to it's root word
"""
def stem_sentence(sentence):
words = sentence.split()
stemmed_sentence = ""
for word in words:
try:
if is_ascii(word):
stemmed_sentence = stemmed_sentence + ps.stem_word(word) + " "
except:
pass
return stemmed_sentence.strip()
"""
Given a string removes urls from the string
Params:
--------
s - String containing urls which have to be removed
Returns:
--------
String without the occurence of the urls
"""
def remove_url(s):
s = s.lower()
words = s.split()
without_url = ""
for word in words:
if word.count('http:') == 0 and word.count('https:') == 0 and word.count('ftp:') == 0 and word.count('www.') == 0 and word.count('.com') == 0 and word.count('.ly') == 0 and word.count('.st') == 0:
without_url = without_url + word + " "
return without_url.strip()
"""
Given a string removes all the words whose length is less than 3
Params:
--------
s - String from which small words have to be removed.
Returns:
---------
Returns a string without occurence of small words
"""
def remove_small_words(s):
words = s.split()
clean_string = ""
for word in words:
if len(word) >= 3:
clean_string = clean_string + word + " "
return clean_string.strip()
"""
Defines the pipeline for cleaning and preprocessing of text
Params:
--------
s - String containing the text which has to be preprocessed
Returns:
---------
String which has been passed through the preprocessing pipeline
"""
def text_clean_pipeline(s):
s = remove_url(s)
s = remove_punctuations(s)
s = remove_html_tags(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_digits(s)
#s = stem_sentence(s)
s = remove_small_words(s)
return s
"""
Given a list of sentences processes the list through the pre-preprocessing pipeline and returns the list
Params:
--------
sentence_list - List of sentences which are to be cleaned
Returns:
---------
The cleaned and pre-processed sentence list
"""
def text_clean_pipeline_list(sentence_list):
clean_sentence_list = list([])
for s in sentence_list:
s = remove_digits(s)
s = remove_punctuations(s)
s = remove_html_tags(s)
s = remove_stopwords(s)
s = remove_months(s)
s = remove_small_words(s)
#s = encode_ascii(s)
#s = remove_url(s)
#s = stem_sentence(s)
clean_sentence_list.append(s)
return clean_sentence_list
"""
Given a excel filepath and a corresponding sheetname reads it and converts it into a dataframe
Params:
--------
filename - Filepath containing the location and name of the file
sheetname - Name of the sheet containing the data
Returns:
---------
pandas dataframe containing the data from the excel file
"""
def get_dataframe_from_excel(filename,sheetname):
xl_file = pd.ExcelFile(filename)
data_frame = xl_file.parse(sheetname)
return data_frame
| mit |
q1ang/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e484.py | 2 | 5509 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer, PolygonOutputLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer,
DimshuffleLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 2
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", "2013-04-18"),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True
# ignore_incomplete=True
# offset_probability=0.5,
# ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-9,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 5000: 1e-5
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = RandomSegmentsInMemory(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': PolygonOutputLayer,
'num_units': 2,
'seq_length': seq_length
},
{
'type': ReshapeLayer,
'shape': source.output_shape()
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge', 'freezer'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 1500)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
import ipdb; ipdb.set_trace()
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e484.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
ilyes14/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.