repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nhenezi/kuma | vendor/packages/ipython/IPython/Shell.py | 6 | 45771 | # -*- coding: utf-8 -*-
"""IPython Shell classes.
All the matplotlib support code was co-developed with John Hunter,
matplotlib's author.
"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Code begins
# Stdlib imports
import __builtin__
import __main__
import Queue
import inspect
import os
import sys
import thread
import threading
import time
from signal import signal, SIGINT
try:
import ctypes
HAS_CTYPES = True
except ImportError:
HAS_CTYPES = False
# IPython imports
import IPython
from IPython import ultraTB, ipapi
from IPython.Magic import Magic
from IPython.genutils import Term,warn,error,flag_calls, ask_yes_no
from IPython.iplib import InteractiveShell
from IPython.ipmaker import make_IPython
from IPython.ipstruct import Struct
from IPython.testing import decorators as testdec
# Globals
# global flag to pass around information about Ctrl-C without exceptions
KBINT = False
# global flag to turn on/off Tk support.
USE_TK = False
# ID for the main thread, used for cross-thread exceptions
MAIN_THREAD_ID = thread.get_ident()
# Tag when runcode() is active, for exception handling
CODE_RUN = None
# Default timeout for waiting for multithreaded shells (in seconds)
GUI_TIMEOUT = 10
#-----------------------------------------------------------------------------
# This class is trivial now, but I want to have it in to publish a clean
# interface. Later when the internals are reorganized, code that uses this
# shouldn't have to change.
class IPShell:
"""Create an IPython instance."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=InteractiveShell):
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,shell_class=shell_class)
def mainloop(self,sys_exit=0,banner=None):
self.IP.mainloop(banner)
if sys_exit:
sys.exit()
#-----------------------------------------------------------------------------
def kill_embedded(self,parameter_s=''):
"""%kill_embedded : deactivate for good the current embedded IPython.
This function (after asking for confirmation) sets an internal flag so that
an embedded IPython will never activate again. This is useful to
permanently disable a shell that is being called inside a loop: once you've
figured out what you needed from it, you may then kill it and the program
will then continue to run without the interactive shell interfering again.
"""
kill = ask_yes_no("Are you sure you want to kill this embedded instance "
"(y/n)? [y/N] ",'n')
if kill:
self.shell.embedded_active = False
print "This embedded IPython will not reactivate anymore once you exit."
class IPShellEmbed:
"""Allow embedding an IPython shell into a running program.
Instances of this class are callable, with the __call__ method being an
alias to the embed() method of an InteractiveShell instance.
Usage (see also the example-embed.py file for a running example):
ipshell = IPShellEmbed([argv,banner,exit_msg,rc_override])
- argv: list containing valid command-line options for IPython, as they
would appear in sys.argv[1:].
For example, the following command-line options:
$ ipython -prompt_in1 'Input <\\#>' -colors LightBG
would be passed in the argv list as:
['-prompt_in1','Input <\\#>','-colors','LightBG']
- banner: string which gets printed every time the interpreter starts.
- exit_msg: string which gets printed every time the interpreter exits.
- rc_override: a dict or Struct of configuration options such as those
used by IPython. These options are read from your ~/.ipython/ipythonrc
file when the Shell object is created. Passing an explicit rc_override
dict with any options you want allows you to override those values at
creation time without having to modify the file. This way you can create
embeddable instances configured in any way you want without editing any
global files (thus keeping your interactive IPython configuration
unchanged).
Then the ipshell instance can be called anywhere inside your code:
ipshell(header='') -> Opens up an IPython shell.
- header: string printed by the IPython shell upon startup. This can let
you know where in your code you are when dropping into the shell. Note
that 'banner' gets prepended to all calls, so header is used for
location-specific information.
For more details, see the __call__ method below.
When the IPython shell is exited with Ctrl-D, normal program execution
resumes.
This functionality was inspired by a posting on comp.lang.python by cmkl
<[email protected]> on Dec. 06/01 concerning similar uses of pyrepl, and
by the IDL stop/continue commands."""
def __init__(self,argv=None,banner='',exit_msg=None,rc_override=None,
user_ns=None):
"""Note that argv here is a string, NOT a list."""
self.set_banner(banner)
self.set_exit_msg(exit_msg)
self.set_dummy_mode(0)
# sys.displayhook is a global, we need to save the user's original
# Don't rely on __displayhook__, as the user may have changed that.
self.sys_displayhook_ori = sys.displayhook
# save readline completer status
try:
#print 'Save completer',sys.ipcompleter # dbg
self.sys_ipcompleter_ori = sys.ipcompleter
except:
pass # not nested with IPython
self.IP = make_IPython(argv,rc_override=rc_override,
embedded=True,
user_ns=user_ns)
ip = ipapi.IPApi(self.IP)
ip.expose_magic("kill_embedded",kill_embedded)
# copy our own displayhook also
self.sys_displayhook_embed = sys.displayhook
# and leave the system's display hook clean
sys.displayhook = self.sys_displayhook_ori
# don't use the ipython crash handler so that user exceptions aren't
# trapped
sys.excepthook = ultraTB.FormattedTB(color_scheme = self.IP.rc.colors,
mode = self.IP.rc.xmode,
call_pdb = self.IP.rc.pdb)
self.restore_system_completer()
def restore_system_completer(self):
"""Restores the readline completer which was in place.
This allows embedded IPython within IPython not to disrupt the
parent's completion.
"""
try:
self.IP.readline.set_completer(self.sys_ipcompleter_ori)
sys.ipcompleter = self.sys_ipcompleter_ori
except:
pass
def __call__(self,header='',local_ns=None,global_ns=None,dummy=None):
"""Activate the interactive interpreter.
__call__(self,header='',local_ns=None,global_ns,dummy=None) -> Start
the interpreter shell with the given local and global namespaces, and
optionally print a header string at startup.
The shell can be globally activated/deactivated using the
set/get_dummy_mode methods. This allows you to turn off a shell used
for debugging globally.
However, *each* time you call the shell you can override the current
state of dummy_mode with the optional keyword parameter 'dummy'. For
example, if you set dummy mode on with IPShell.set_dummy_mode(1), you
can still have a specific call work by making it as IPShell(dummy=0).
The optional keyword parameter dummy controls whether the call
actually does anything. """
# If the user has turned it off, go away
if not self.IP.embedded_active:
return
# Normal exits from interactive mode set this flag, so the shell can't
# re-enter (it checks this variable at the start of interactive mode).
self.IP.exit_now = False
# Allow the dummy parameter to override the global __dummy_mode
if dummy or (dummy != 0 and self.__dummy_mode):
return
# Set global subsystems (display,completions) to our values
sys.displayhook = self.sys_displayhook_embed
if self.IP.has_readline:
self.IP.set_completer()
if self.banner and header:
format = '%s\n%s\n'
else:
format = '%s%s\n'
banner = format % (self.banner,header)
# Call the embedding code with a stack depth of 1 so it can skip over
# our call and get the original caller's namespaces.
self.IP.embed_mainloop(banner,local_ns,global_ns,stack_depth=1)
if self.exit_msg:
print self.exit_msg
# Restore global systems (display, completion)
sys.displayhook = self.sys_displayhook_ori
self.restore_system_completer()
def set_dummy_mode(self,dummy):
"""Sets the embeddable shell's dummy mode parameter.
set_dummy_mode(dummy): dummy = 0 or 1.
This parameter is persistent and makes calls to the embeddable shell
silently return without performing any action. This allows you to
globally activate or deactivate a shell you're using with a single call.
If you need to manually"""
if dummy not in [0,1,False,True]:
raise ValueError,'dummy parameter must be boolean'
self.__dummy_mode = dummy
def get_dummy_mode(self):
"""Return the current value of the dummy mode parameter.
"""
return self.__dummy_mode
def set_banner(self,banner):
"""Sets the global banner.
This banner gets prepended to every header printed when the shell
instance is called."""
self.banner = banner
def set_exit_msg(self,exit_msg):
"""Sets the global exit_msg.
This exit message gets printed upon exiting every time the embedded
shell is called. It is None by default. """
self.exit_msg = exit_msg
#-----------------------------------------------------------------------------
if HAS_CTYPES:
# Add async exception support. Trick taken from:
# http://sebulba.wikispaces.com/recipe+thread2
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
# Explicit cast to c_long is necessary for 64-bit support:
# See https://bugs.launchpad.net/ipython/+bug/237073
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# If it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def sigint_handler(signum,stack_frame):
"""Sigint handler for threaded apps.
This is a horrible hack to pass information about SIGINT _without_
using exceptions, since I haven't been able to properly manage
cross-thread exceptions in GTK/WX. In fact, I don't think it can be
done (or at least that's my understanding from a c.l.py thread where
this was discussed)."""
global KBINT
if CODE_RUN:
_async_raise(MAIN_THREAD_ID,KeyboardInterrupt)
else:
KBINT = True
print '\nKeyboardInterrupt - Press <Enter> to continue.',
Term.cout.flush()
else:
def sigint_handler(signum,stack_frame):
"""Sigint handler for threaded apps.
This is a horrible hack to pass information about SIGINT _without_
using exceptions, since I haven't been able to properly manage
cross-thread exceptions in GTK/WX. In fact, I don't think it can be
done (or at least that's my understanding from a c.l.py thread where
this was discussed)."""
global KBINT
print '\nKeyboardInterrupt - Press <Enter> to continue.',
Term.cout.flush()
# Set global flag so that runsource can know that Ctrl-C was hit
KBINT = True
class MTInteractiveShell(InteractiveShell):
"""Simple multi-threaded shell."""
# Threading strategy taken from:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65109, by Brian
# McErlean and John Finlay. Modified with corrections by Antoon Pardon,
# from the pygtk mailing list, to avoid lockups with system calls.
# class attribute to indicate whether the class supports threads or not.
# Subclasses with thread support should override this as needed.
isthreaded = True
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None,banner2='',
gui_timeout=GUI_TIMEOUT,**kw):
"""Similar to the normal InteractiveShell, but with threading control"""
InteractiveShell.__init__(self,name,usage,rc,user_ns,
user_global_ns,banner2)
# Timeout we wait for GUI thread
self.gui_timeout = gui_timeout
# A queue to hold the code to be executed.
self.code_queue = Queue.Queue()
# Stuff to do at closing time
self._kill = None
on_kill = kw.get('on_kill', [])
# Check that all things to kill are callable:
for t in on_kill:
if not callable(t):
raise TypeError,'on_kill must be a list of callables'
self.on_kill = on_kill
# thread identity of the "worker thread" (that may execute code directly)
self.worker_ident = None
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Modified version of code.py's runsource(), to handle threading issues.
See the original for full docstring details."""
global KBINT
# If Ctrl-C was typed, we reset the flag and return right away
if KBINT:
KBINT = False
return False
if self._kill:
# can't queue new code if we are being killed
return True
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# shortcut - if we are in worker thread, or the worker thread is not
# running, execute directly (to allow recursion and prevent deadlock if
# code is run early in IPython construction)
if (self.worker_ident is None
or self.worker_ident == thread.get_ident() ):
InteractiveShell.runcode(self,code)
return False
# Case 3
# Store code in queue, so the execution thread can handle it.
completed_ev, received_ev = threading.Event(), threading.Event()
self.code_queue.put((code,completed_ev, received_ev))
# first make sure the message was received, with timeout
received_ev.wait(self.gui_timeout)
if not received_ev.isSet():
# the mainloop is dead, start executing code directly
print "Warning: Timeout for mainloop thread exceeded"
print "switching to nonthreaded mode (until mainloop wakes up again)"
self.worker_ident = None
else:
completed_ev.wait()
return False
def runcode(self):
"""Execute a code object.
Multithreaded wrapper around IPython's runcode()."""
global CODE_RUN
# we are in worker thread, stash out the id for runsource()
self.worker_ident = thread.get_ident()
if self._kill:
print >>Term.cout, 'Closing threads...',
Term.cout.flush()
for tokill in self.on_kill:
tokill()
print >>Term.cout, 'Done.'
# allow kill() to return
self._kill.set()
return True
# Install sigint handler. We do it every time to ensure that if user
# code modifies it, we restore our own handling.
try:
signal(SIGINT,sigint_handler)
except SystemError:
# This happens under Windows, which seems to have all sorts
# of problems with signal handling. Oh well...
pass
# Flush queue of pending code by calling the run methood of the parent
# class with all items which may be in the queue.
code_to_run = None
while 1:
try:
code_to_run, completed_ev, received_ev = self.code_queue.get_nowait()
except Queue.Empty:
break
received_ev.set()
# Exceptions need to be raised differently depending on which
# thread is active. This convoluted try/except is only there to
# protect against asynchronous exceptions, to ensure that a KBINT
# at the wrong time doesn't deadlock everything. The global
# CODE_TO_RUN is set to true/false as close as possible to the
# runcode() call, so that the KBINT handler is correctly informed.
try:
try:
CODE_RUN = True
InteractiveShell.runcode(self,code_to_run)
except KeyboardInterrupt:
print "Keyboard interrupted in mainloop"
while not self.code_queue.empty():
code, ev1,ev2 = self.code_queue.get_nowait()
ev1.set()
ev2.set()
break
finally:
CODE_RUN = False
# allow runsource() return from wait
completed_ev.set()
# This MUST return true for gtk threading to work
return True
def kill(self):
"""Kill the thread, returning when it has been shut down."""
self._kill = threading.Event()
self._kill.wait()
class MatplotlibShellBase:
"""Mixin class to provide the necessary modifications to regular IPython
shell classes for matplotlib support.
Given Python's MRO, this should be used as the FIRST class in the
inheritance hierarchy, so that it overrides the relevant methods."""
def _matplotlib_config(self,name,user_ns,user_global_ns=None):
"""Return items needed to setup the user's shell with matplotlib"""
# Initialize matplotlib to interactive mode always
import matplotlib
from matplotlib import backends
matplotlib.interactive(True)
def use(arg):
"""IPython wrapper for matplotlib's backend switcher.
In interactive use, we can not allow switching to a different
interactive backend, since thread conflicts will most likely crash
the python interpreter. This routine does a safety check first,
and refuses to perform a dangerous switch. It still allows
switching to non-interactive backends."""
if arg in backends.interactive_bk and arg != self.mpl_backend:
m=('invalid matplotlib backend switch.\n'
'This script attempted to switch to the interactive '
'backend: `%s`\n'
'Your current choice of interactive backend is: `%s`\n\n'
'Switching interactive matplotlib backends at runtime\n'
'would crash the python interpreter, '
'and IPython has blocked it.\n\n'
'You need to either change your choice of matplotlib backend\n'
'by editing your .matplotlibrc file, or run this script as a \n'
'standalone file from the command line, not using IPython.\n' %
(arg,self.mpl_backend) )
raise RuntimeError, m
else:
self.mpl_use(arg)
self.mpl_use._called = True
self.matplotlib = matplotlib
self.mpl_backend = matplotlib.rcParams['backend']
# we also need to block switching of interactive backends by use()
self.mpl_use = matplotlib.use
self.mpl_use._called = False
# overwrite the original matplotlib.use with our wrapper
matplotlib.use = use
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
self.pylab = pylab
self.pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
self.pylab.draw_if_interactive = flag_calls(self.pylab.draw_if_interactive)
# Build a user namespace initialized with matplotlib/matlab features.
user_ns, user_global_ns = IPython.ipapi.make_user_namespaces(user_ns,
user_global_ns)
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
exec ("import numpy\n"
"import numpy as np\n"
"import matplotlib\n"
"import matplotlib.pylab as pylab\n"
"try:\n"
" import matplotlib.pyplot as plt\n"
"except ImportError:\n"
" pass\n"
) in user_ns
# Build matplotlib info banner
b="""
Welcome to pylab, a matplotlib-based Python environment.
For more information, type 'help(pylab)'.
"""
return user_ns,user_global_ns,b
def mplot_exec(self,fname,*where,**kw):
"""Execute a matplotlib script.
This is a call to execfile(), but wrapped in safeties to properly
handle interactive rendering and backend switching."""
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
isInteractive = self.matplotlib.rcParams['interactive']
self.matplotlib.interactive(False)
self.safe_execfile(fname,*where,**kw)
self.matplotlib.interactive(isInteractive)
# make rendering call now, if the user tried to do it
if self.pylab.draw_if_interactive.called:
self.pylab.draw()
self.pylab.draw_if_interactive.called = False
# if a backend switch was performed, reverse it now
if self.mpl_use._called:
self.matplotlib.rcParams['backend'] = self.mpl_backend
@testdec.skip_doctest
def magic_run(self,parameter_s=''):
Magic.magic_run(self,parameter_s,runner=self.mplot_exec)
# Fix the docstring so users see the original as well
magic_run.__doc__ = "%s\n%s" % (Magic.magic_run.__doc__,
"\n *** Modified %run for Matplotlib,"
" with proper interactive handling ***")
# Now we provide 2 versions of a matplotlib-aware IPython base shells, single
# and multithreaded. Note that these are meant for internal use, the IPShell*
# classes below are the ones meant for public consumption.
class MatplotlibShell(MatplotlibShellBase,InteractiveShell):
"""Single-threaded shell with matplotlib support."""
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None,**kw):
user_ns,user_global_ns,b2 = self._matplotlib_config(name,user_ns,user_global_ns)
InteractiveShell.__init__(self,name,usage,rc,user_ns,user_global_ns,
banner2=b2,**kw)
class MatplotlibMTShell(MatplotlibShellBase,MTInteractiveShell):
"""Multi-threaded shell with matplotlib support."""
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None, **kw):
user_ns,user_global_ns,b2 = self._matplotlib_config(name,user_ns,user_global_ns)
MTInteractiveShell.__init__(self,name,usage,rc,user_ns,user_global_ns,
banner2=b2,**kw)
#-----------------------------------------------------------------------------
# Utility functions for the different GUI enabled IPShell* classes.
def get_tk():
"""Tries to import Tkinter and returns a withdrawn Tkinter root
window. If Tkinter is already imported or not available, this
returns None. This function calls `hijack_tk` underneath.
"""
if not USE_TK or sys.modules.has_key('Tkinter'):
return None
else:
try:
import Tkinter
except ImportError:
return None
else:
hijack_tk()
r = Tkinter.Tk()
r.withdraw()
return r
def hijack_tk():
"""Modifies Tkinter's mainloop with a dummy so when a module calls
mainloop, it does not block.
"""
def misc_mainloop(self, n=0):
pass
def tkinter_mainloop(n=0):
pass
import Tkinter
Tkinter.Misc.mainloop = misc_mainloop
Tkinter.mainloop = tkinter_mainloop
def update_tk(tk):
"""Updates the Tkinter event loop. This is typically called from
the respective WX or GTK mainloops.
"""
if tk:
tk.update()
def hijack_wx():
"""Modifies wxPython's MainLoop with a dummy so user code does not
block IPython. The hijacked mainloop function is returned.
"""
def dummy_mainloop(*args, **kw):
pass
try:
import wx
except ImportError:
# For very old versions of WX
import wxPython as wx
ver = wx.__version__
orig_mainloop = None
if ver[:3] >= '2.5':
import wx
if hasattr(wx, '_core_'): core = getattr(wx, '_core_')
elif hasattr(wx, '_core'): core = getattr(wx, '_core')
else: raise AttributeError('Could not find wx core module')
orig_mainloop = core.PyApp_MainLoop
core.PyApp_MainLoop = dummy_mainloop
elif ver[:3] == '2.4':
orig_mainloop = wx.wxc.wxPyApp_MainLoop
wx.wxc.wxPyApp_MainLoop = dummy_mainloop
else:
warn("Unable to find either wxPython version 2.4 or >= 2.5.")
return orig_mainloop
def hijack_gtk():
"""Modifies pyGTK's mainloop with a dummy so user code does not
block IPython. This function returns the original `gtk.mainloop`
function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
import gtk
if gtk.pygtk_version >= (2,4,0): orig_mainloop = gtk.main
else: orig_mainloop = gtk.mainloop
gtk.mainloop = dummy_mainloop
gtk.main = dummy_mainloop
return orig_mainloop
def hijack_qt():
"""Modifies PyQt's mainloop with a dummy so user code does not
block IPython. This function returns the original
`qt.qApp.exec_loop` function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
import qt
orig_mainloop = qt.qApp.exec_loop
qt.qApp.exec_loop = dummy_mainloop
qt.QApplication.exec_loop = dummy_mainloop
return orig_mainloop
def hijack_qt4():
"""Modifies PyQt4's mainloop with a dummy so user code does not
block IPython. This function returns the original
`QtGui.qApp.exec_` function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
from PyQt4 import QtGui, QtCore
orig_mainloop = QtGui.qApp.exec_
QtGui.qApp.exec_ = dummy_mainloop
QtGui.QApplication.exec_ = dummy_mainloop
QtCore.QCoreApplication.exec_ = dummy_mainloop
return orig_mainloop
#-----------------------------------------------------------------------------
# The IPShell* classes below are the ones meant to be run by external code as
# IPython instances. Note that unless a specific threading strategy is
# desired, the factory function start() below should be used instead (it
# selects the proper threaded class).
class IPThread(threading.Thread):
def run(self):
self.IP.mainloop(self._banner)
self.IP.kill()
class IPShellGTK(IPThread):
"""Run a gtk mainloop() in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
GTK timeout callback."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=MTInteractiveShell):
import gtk
# Check for set_interactive, coming up in new pygtk.
# Disable it so that this code works, but notify
# the user that he has a better option as well.
# XXX TODO better support when set_interactive is released
try:
gtk.set_interactive(False)
print "Your PyGtk has set_interactive(), so you can use the"
print "more stable single-threaded Gtk mode."
print "See https://bugs.launchpad.net/ipython/+bug/270856"
except AttributeError:
pass
self.gtk = gtk
self.gtk_mainloop = hijack_gtk()
# Allows us to use both Tk and GTK.
self.tk = get_tk()
if gtk.pygtk_version >= (2,4,0): mainquit = self.gtk.main_quit
else: mainquit = self.gtk.mainquit
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[mainquit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self,sys_exit=0,banner=None):
self._banner = banner
if self.gtk.pygtk_version >= (2,4,0):
import gobject
gobject.idle_add(self.on_timer)
else:
self.gtk.idle_add(self.on_timer)
if sys.platform != 'win32':
try:
if self.gtk.gtk_version[0] >= 2:
self.gtk.gdk.threads_init()
except AttributeError:
pass
except RuntimeError:
error('Your pyGTK likely has not been compiled with '
'threading support.\n'
'The exception printout is below.\n'
'You can either rebuild pyGTK with threads, or '
'try using \n'
'matplotlib with a different backend (like Tk or WX).\n'
'Note that matplotlib will most likely not work in its '
'current state!')
self.IP.InteractiveTB()
self.start()
self.gtk.gdk.threads_enter()
self.gtk_mainloop()
self.gtk.gdk.threads_leave()
self.join()
def on_timer(self):
"""Called when GTK is idle.
Must return True always, otherwise GTK stops calling it"""
update_tk(self.tk)
self.IP.runcode()
time.sleep(0.01)
return True
class IPShellWX(IPThread):
"""Run a wx mainloop() in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
GTK timeout callback."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=MTInteractiveShell):
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[self.wxexit])
wantedwxversion=self.IP.rc.wxversion
if wantedwxversion!="0":
try:
import wxversion
except ImportError:
error('The wxversion module is needed for WX version selection')
else:
try:
wxversion.select(wantedwxversion)
except:
self.IP.InteractiveTB()
error('Requested wxPython version %s could not be loaded' %
wantedwxversion)
import wx
threading.Thread.__init__(self)
self.wx = wx
self.wx_mainloop = hijack_wx()
# Allows us to use both Tk and GTK.
self.tk = get_tk()
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
self.app = None
def wxexit(self, *args):
if self.app is not None:
self.app.agent.timer.Stop()
self.app.ExitMainLoop()
def mainloop(self,sys_exit=0,banner=None):
self._banner = banner
self.start()
class TimerAgent(self.wx.MiniFrame):
wx = self.wx
IP = self.IP
tk = self.tk
def __init__(self, parent, interval):
style = self.wx.DEFAULT_FRAME_STYLE | self.wx.TINY_CAPTION_HORIZ
self.wx.MiniFrame.__init__(self, parent, -1, ' ', pos=(200, 200),
size=(100, 100),style=style)
self.Show(False)
self.interval = interval
self.timerId = self.wx.NewId()
def StartWork(self):
self.timer = self.wx.Timer(self, self.timerId)
self.wx.EVT_TIMER(self, self.timerId, self.OnTimer)
self.timer.Start(self.interval)
def OnTimer(self, event):
update_tk(self.tk)
self.IP.runcode()
class App(self.wx.App):
wx = self.wx
TIMEOUT = self.TIMEOUT
def OnInit(self):
'Create the main window and insert the custom frame'
self.agent = TimerAgent(None, self.TIMEOUT)
self.agent.Show(False)
self.agent.StartWork()
return True
self.app = App(redirect=False)
self.wx_mainloop(self.app)
self.join()
class IPShellQt(IPThread):
"""Run a Qt event loop in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
Qt timer / slot."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self, argv=None, user_ns=None, user_global_ns=None,
debug=0, shell_class=MTInteractiveShell):
import qt
self.exec_loop = hijack_qt()
# Allows us to use both Tk and QT.
self.tk = get_tk()
self.IP = make_IPython(argv,
user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[qt.qApp.exit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self, sys_exit=0, banner=None):
import qt
self._banner = banner
if qt.QApplication.startingUp():
a = qt.QApplication(sys.argv)
self.timer = qt.QTimer()
qt.QObject.connect(self.timer,
qt.SIGNAL('timeout()'),
self.on_timer)
self.start()
self.timer.start(self.TIMEOUT, True)
while True:
if self.IP._kill: break
self.exec_loop()
self.join()
def on_timer(self):
update_tk(self.tk)
result = self.IP.runcode()
self.timer.start(self.TIMEOUT, True)
return result
class IPShellQt4(IPThread):
"""Run a Qt event loop in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
Qt timer / slot."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self, argv=None, user_ns=None, user_global_ns=None,
debug=0, shell_class=MTInteractiveShell):
from PyQt4 import QtCore, QtGui
try:
# present in PyQt4-4.2.1 or later
QtCore.pyqtRemoveInputHook()
except AttributeError:
pass
if QtCore.PYQT_VERSION_STR == '4.3':
warn('''PyQt4 version 4.3 detected.
If you experience repeated threading warnings, please update PyQt4.
''')
self.exec_ = hijack_qt4()
# Allows us to use both Tk and QT.
self.tk = get_tk()
self.IP = make_IPython(argv,
user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[QtGui.qApp.exit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self, sys_exit=0, banner=None):
from PyQt4 import QtCore, QtGui
self._banner = banner
if QtGui.QApplication.startingUp():
a = QtGui.QApplication(sys.argv)
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer,
QtCore.SIGNAL('timeout()'),
self.on_timer)
self.start()
self.timer.start(self.TIMEOUT)
while True:
if self.IP._kill: break
self.exec_()
self.join()
def on_timer(self):
update_tk(self.tk)
result = self.IP.runcode()
self.timer.start(self.TIMEOUT)
return result
# A set of matplotlib public IPython shell classes, for single-threaded (Tk*
# and FLTK*) and multithreaded (GTK*, WX* and Qt*) backends to use.
def _load_pylab(user_ns):
"""Allow users to disable pulling all of pylab into the top-level
namespace.
This little utility must be called AFTER the actual ipython instance is
running, since only then will the options file have been fully parsed."""
ip = IPython.ipapi.get()
if ip.options.pylab_import_all:
ip.ex("from matplotlib.pylab import *")
ip.IP.user_config_ns.update(ip.user_ns)
class IPShellMatplotlib(IPShell):
"""Subclass IPShell with MatplotlibShell as the internal shell.
Single-threaded class, meant for the Tk* and FLTK* backends.
Having this on a separate class simplifies the external driver code."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShell.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibGTK(IPShellGTK):
"""Subclass IPShellGTK with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the GTK* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellGTK.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibWX(IPShellWX):
"""Subclass IPShellWX with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the WX* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellWX.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibQt(IPShellQt):
"""Subclass IPShellQt with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the Qt* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellQt.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibQt4(IPShellQt4):
"""Subclass IPShellQt4 with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the Qt4* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellQt4.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
#-----------------------------------------------------------------------------
# Factory functions to actually start the proper thread-aware shell
def _select_shell(argv):
"""Select a shell from the given argv vector.
This function implements the threading selection policy, allowing runtime
control of the threading mode, both for general users and for matplotlib.
Return:
Shell class to be instantiated for runtime operation.
"""
global USE_TK
mpl_shell = {'gthread' : IPShellMatplotlibGTK,
'wthread' : IPShellMatplotlibWX,
'qthread' : IPShellMatplotlibQt,
'q4thread' : IPShellMatplotlibQt4,
'tkthread' : IPShellMatplotlib, # Tk is built-in
}
th_shell = {'gthread' : IPShellGTK,
'wthread' : IPShellWX,
'qthread' : IPShellQt,
'q4thread' : IPShellQt4,
'tkthread' : IPShell, # Tk is built-in
}
backends = {'gthread' : 'GTKAgg',
'wthread' : 'WXAgg',
'qthread' : 'QtAgg',
'q4thread' :'Qt4Agg',
'tkthread' :'TkAgg',
}
all_opts = set(['tk','pylab','gthread','qthread','q4thread','wthread',
'tkthread'])
user_opts = set([s.replace('-','') for s in argv[:3]])
special_opts = user_opts & all_opts
if 'tk' in special_opts:
USE_TK = True
special_opts.remove('tk')
if 'pylab' in special_opts:
try:
import matplotlib
except ImportError:
error('matplotlib could NOT be imported! Starting normal IPython.')
return IPShell
special_opts.remove('pylab')
# If there's any option left, it means the user wants to force the
# threading backend, else it's auto-selected from the rc file
if special_opts:
th_mode = special_opts.pop()
matplotlib.rcParams['backend'] = backends[th_mode]
else:
backend = matplotlib.rcParams['backend']
if backend.startswith('GTK'):
th_mode = 'gthread'
elif backend.startswith('WX'):
th_mode = 'wthread'
elif backend.startswith('Qt4'):
th_mode = 'q4thread'
elif backend.startswith('Qt'):
th_mode = 'qthread'
else:
# Any other backend, use plain Tk
th_mode = 'tkthread'
return mpl_shell[th_mode]
else:
# No pylab requested, just plain threads
try:
th_mode = special_opts.pop()
except KeyError:
th_mode = 'tkthread'
return th_shell[th_mode]
# This is the one which should be called by external code.
def start(user_ns = None):
"""Return a running shell instance, dealing with threading options.
This is a factory function which will instantiate the proper IPython shell
based on the user's threading choice. Such a selector is needed because
different GUI toolkits require different thread handling details."""
shell = _select_shell(sys.argv)
return shell(user_ns = user_ns)
# Some aliases for backwards compatibility
IPythonShell = IPShell
IPythonShellEmbed = IPShellEmbed
#************************ End of file <Shell.py> ***************************
| mpl-2.0 |
TimeStone/tushare | tushare/stock/billboard.py | 19 | 12058 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
龙虎榜数据
Created on 2015年6月10日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from pandas.compat import StringIO
from tushare.stock import cons as ct
import numpy as np
import time
import re
import lxml.html
from lxml import etree
from tushare.util import dateu as du
from tushare.stock import ref_vars as rv
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def top_list(date = None, retry_count=3, pause=0.001):
"""
获取每日龙虎榜列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 如果为空,返回最近一个交易日的数据
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name :名称
pchange:涨跌幅
amount:龙虎榜成交额(万)
buy:买入额(万)
bratio:占总成交比例
sell:卖出额(万)
sratio :占总成交比例
reason:上榜原因
date :日期
"""
if date is None:
if du.get_hour() < 18:
date = du.last_tddate()
else:
date = du.today()
else:
if(du.is_holiday(date)):
return None
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'], date))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dt_1\"]")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr)[0]
df.columns = [i for i in range(1,12)]
df = df.apply(_f_rows, axis=1)
df = df.fillna(method='ffill')
df = df.drop([1, 4], axis=1)
df.columns = rv.LHB_COLS
df = df.drop_duplicates()
df['code'] = df['code'].astype(int)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['date'] = date
except:
pass
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def cap_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取个股上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _cap_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
if df is not None:
df = df.drop_duplicates('code')
return df
def _cap_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[0],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_GGTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _cap_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def broker_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取营业部上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
---------
broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
scount:卖出席位数
top3:买入前三股票
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _broker_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
return df
def _broker_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[1],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_YYTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _broker_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取机构席位追踪统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _inst_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[2],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([2,3], axis=1)
df.columns = rv.LHB_JGZZ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_detail(retry_count= 3, pause= 0.001):
"""
获取最近一个交易日机构席位成交明细统计数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
----------
code:股票代码
name:股票名称
date:交易日期
bamount:机构席位买入额(万)
samount:机构席位卖出额(万)
type:类型
"""
ct._write_head()
df = _inst_detail(pageNo=1, retry_count=retry_count,
pause=pause)
if len(df)>0:
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_detail(pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[3],
ct.PAGES['fd'], '', pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_JGMX_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_detail(pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def _f_rows(x):
if '%' in x[3]:
x[11] = x[6]
for i in range(6, 11):
x[i] = x[i-5]
for i in range(1, 6):
x[i] = np.NaN
return x
if __name__ == "__main__":
print(top_list('2015-06-17'))
# print(inst_detail())
| bsd-3-clause |
mlyundin/Machine-Learning | ex3/ex3.py | 1 | 1834 | import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from common_functions import add_zero_feature, cf_lr as cost_function, gf_lr as grad_function, \
cf_lr_reg as cost_function_reg, gf_lr_reg as grad_function_reg
if __name__ == '__main__':
data = sio.loadmat('ex3data1.mat')
y = data['y']
X = data['X']
# replace 10 by 0
y = y % 10
n_sampels = 100
sampels = np.random.choice(len(X), n_sampels)
fig = plt.figure(figsize=(8, 8)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i, j in enumerate(sampels):
ax = fig.add_subplot(10, 10, i + 1, xticks=[], yticks=[])
ax.imshow(X[j, :].reshape(20, 20).T, cmap=plt.cm.binary, interpolation='nearest')
ax.text(0, 7, str(y[j, 0]))
plt.show()
num_labels = 10
X = add_zero_feature(X)
m, n = X.shape
initial_theta = np.ones((n, 1))
all_theta = np.vstack([minimize(cost_function, initial_theta, method='BFGS', jac=grad_function, options={'disp': True, 'maxiter':100},
args=(X, (y == i).astype(int))).x for i in range(num_labels)])
y_pred = np.argmax(np.dot(X, all_theta.T), axis=1)
print 'Training Set Accuracy: {}'.format(np.mean(y_pred == y.ravel()) * 100)
# Use regularization
lambda_coef = 0.1
all_theta = np.vstack([minimize(cost_function_reg, initial_theta, method='BFGS', jac=grad_function_reg, options={'disp': True, 'maxiter':100},
args=(X, (y == i).astype(int), lambda_coef)).x for i in range(num_labels)])
y_pred = np.argmax(np.dot(X, all_theta.T), axis=1)
print 'Training Set Accuracy: {}'.format(np.mean(y_pred == y.ravel()) * 100) | mit |
schets/scikit-learn | sklearn/tree/export.py | 6 | 15622 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360./n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | 6 | 6973 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
if not (r.shape == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {}'
'\ng.shape = {}'
'\nb.shape = {}'
.format(r.shape, g.shape, b.shape))
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:,:,0] = r
G = np.zeros_like(RGB)
G[:,:,1] = g
B = np.zeros_like(RGB)
B[:,:,2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| mit |
geekboxzone/lollipop_external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 35 | 11261 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
import detect_host_arch
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chrome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in detect_host_arch.HostArch():
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
pkg_ver_dir = os.path.join(nacl_dir, 'build', 'package_version')
RunCommand([python, os.path.join(pkg_ver_dir, 'package_version.py'),
'--exclude', 'arm_trusted',
'--exclude', 'pnacl_newlib',
'--exclude', 'nacl_arm_newlib',
'sync', '--extract'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
sandeep-n/incubator-systemml | src/main/python/tests/test_mllearn_numpy.py | 4 | 8902 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.context import SparkContext
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
from sklearn import linear_model
sc = SparkContext()
sparkSession = SparkSession.builder.getOrCreate()
import os
def writeColVector(X, fileName):
fileName = os.path.join(os.getcwd(), fileName)
X.tofile(fileName, sep='\n')
metaDataFileContent = '{ "data_type": "matrix", "value_type": "double", "rows":' + str(len(X)) + ', "cols": 1, "nnz": -1, "format": "csv", "author": "systemml-tests", "created": "0000-00-00 00:00:00 PST" }'
with open(fileName+'.mtd', 'w') as text_file:
text_file.write(metaDataFileContent)
def deleteIfExists(fileName):
try:
os.remove(fileName)
except OSError:
pass
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
logistic = LogisticRegression(sparkSession)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_logistic_mlpipeline(self):
training = sparkSession.createDataFrame([
("a b c d e spark", 1.0),
("b d", 2.0),
("spark f g h", 1.0),
("hadoop mapreduce", 2.0),
("b spark who", 1.0),
("g d a y", 2.0),
("spark fly", 1.0),
("was mapreduce", 2.0),
("e spark program", 1.0),
("a e c l", 2.0),
("spark compile", 1.0),
("hadoop software", 2.0)
], ["text", "label"])
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20)
lr = LogisticRegression(sparkSession)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
model = pipeline.fit(training)
test = sparkSession.createDataFrame([
("spark i j k", 1.0),
("l m n", 2.0),
("mapreduce spark", 1.0),
("apache hadoop", 2.0)], ["text", "label"])
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator()
score = evaluator.evaluate(predictionAndLabels)
self.failUnless(score == 1.0)
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, tol=0.0001)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
accuracy = accuracy_score(sklearn_predicted, mllearn_predicted)
evaluation = 'test_svm accuracy_score(sklearn_predicted, mllearn_predicted) was {}'.format(accuracy)
self.failUnless(accuracy > 0.95, evaluation)
def test_naive_bayes(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(X_train, y_train).predict(X_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
def test_naive_bayes1(self):
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
vectorizer = TfidfVectorizer()
# Both vectors and vectors_test are SciPy CSR matrix
vectors = vectorizer.fit_transform(newsgroups_train.data)
vectors_test = vectorizer.transform(newsgroups_test.data)
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(vectors, newsgroups_train.target).predict(vectors_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(vectors, newsgroups_train.target).predict(vectors_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
226262/Neural-Network-Digit-Recognition | plotter.py | 1 | 3993 | import os
import numpy
import matplotlib.pyplot as plt
import pygame, random
class Plotter:
width=300
height=width
array=numpy.full((width,height),0)
xMin=width
xMax=0
yMin=height
yMax=0
edge=0
isAnythingDrew = False
def write_rad(self,x,y,promien):
if promien>0:
if (x-promien)>0 and (x+promien)<self.width and (y-promien)>0 and (y+promien)<self.width:
j=0
for x in range(x-promien,x+promien+1):
if j<=promien:
self.array[x][y+j]=1
self.array[x][y-j]=1
j=j+1
if j>promien:
j=j-1
self.array[x][y+j]
self.write_rad(x,y,promien-1)
def cut_and_scale_down(self):
if (self.yMax-self.yMin)>=(self.xMax-self.xMin):
edge=self.yMax-self.yMin
else:
edge=self.xMax-self.xMin
frame=56
sideFrame=(frame/2)
tmp_array=numpy.full(((edge+frame),(edge+frame)),0)
tmp_scaled_array=numpy.full((28,28),0)
for j in range(int((((edge+frame)/2)-(self.xMax-self.xMin)/2)),int(((edge+frame)/2)+((self.xMax-self.xMin)/2))):
for i in range(int(sideFrame),int(edge+sideFrame)):
tmp_array[i][j]=self.array[self.yMin+i-int(sideFrame)][self.xMin+j-int(((edge+frame)/2)-((self.xMax-self.xMin)/2))]
for i in range(0,(edge+frame-1)):
for j in range(0,(edge+frame-1)):
if tmp_array[i][j]==1:
tmp_scaled_array[int((i*28)/(edge+frame))][int((j*28)/(edge+frame))]=1
self.array=tmp_scaled_array
# print(self.array)
def input_stuff(self):
screen = pygame.display.set_mode((self.width,self.height))
draw_on = False
last_pos = (0, 0)
color = (255, 255, 255)
radius = 3
def roundline(srf, color, start, end, radius=1):
self.isAnythingDrew = True
dx = end[0]-start[0]
dy = end[1]-start[1]
distance = max(abs(dx), abs(dy))
for i in range(distance):
x = int( start[0]+float(i)/distance*dx)
y = int( start[1]+float(i)/distance*dy)
if x<self.xMin:
self.xMin=x
if x>self.xMax:
self.xMax=x
if y<self.yMin:
self.yMin=y
if y>self.yMax:
self.yMax=y
self.write_rad(y,x,2)
pygame.draw.circle(srf, color, (x, y), radius)
try:
while True:
e = pygame.event.wait()
if e.type == pygame.QUIT:
raise StopIteration
if e.type == pygame.MOUSEBUTTONDOWN:
# color = (255, 255, 255)
# pygame.draw.circle(screen, color, e.pos, radius)
draw_on = True
if e.type == pygame.MOUSEBUTTONUP:
draw_on = False
if e.type == pygame.MOUSEMOTION:
if draw_on:
pygame.draw.circle(screen, color, e.pos, radius)
roundline(screen, color, e.pos, last_pos, radius)
last_pos = e.pos
pygame.display.flip()
except StopIteration:
pass
pygame.quit()
if(self.isAnythingDrew):
self.cut_and_scale_down()
return self.array
else:
print("You haven't drew anything :c")
exit()
def flush(self):
self.array=numpy.full((self.width,self.height),0)
self.xMin=self.width
self.xMax=0
self.yMin=self.height
self.yMax=0
self.edge=0
self.isAnythingDrew = False
| gpl-3.0 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py | 3 | 121698 | # -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <[email protected]>
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
from __future__ import absolute_import
import __builtin__ as builtin_mod
import __future__
import abc
import ast
import atexit
import os
import re
import runpy
import sys
import tempfile
import types
import urllib
from io import open as io_open
from IPython.config.configurable import SingletonConfigurable
from IPython.core import debugger, oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import shadowns
from IPython.core import ultratb
from IPython.core.alias import AliasManager, AliasError
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.compilerop import CachingCompiler
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.fakemodule import FakeModule, init_fakemod_dict
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputsplitter import IPythonInputSplitter, ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.plugin import PluginManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.pylabtools import pylab_activate
from IPython.core.prompts import PromptManager
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.doctestreload import doctest_reload
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.utils.path import get_home_dir, get_ipython_dir, get_py_filename, unquote_filename
from IPython.utils.pickleshare import PickleShareDB
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import (format_screen, LSString, SList,
DollarFormatter)
from IPython.utils.traitlets import (Integer, CBool, CaselessStrEnum, Enum,
List, Unicode, Instance, Type)
from IPython.utils.warn import warn, error
import IPython.core.hooks
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
def no_op(*a, **kw): pass
class NoOpContext(object):
def __enter__(self): pass
def __exit__(self, type, value, traceback): pass
no_op_context = NoOpContext()
class SpaceInInput(Exception): pass
class Bunch: pass
def get_default_colors():
if sys.platform=='darwin':
return "LightBG"
elif os.name=='nt':
return 'Linux'
else:
return 'Linux'
class SeparateUnicode(Unicode):
"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and '\\n'->'\n'.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
class ReadlineNoRecord(object):
"""Context manager to execute some code, then reload readline history
so that interactive input to the code doesn't appear when pressing up."""
def __init__(self, shell):
self.shell = shell
self._nested_level = 0
def __enter__(self):
if self._nested_level == 0:
try:
self.orig_length = self.current_length()
self.readline_tail = self.get_readline_tail()
except (AttributeError, IndexError): # Can fail with pyreadline
self.orig_length, self.readline_tail = 999999, []
self._nested_level += 1
def __exit__(self, type, value, traceback):
self._nested_level -= 1
if self._nested_level == 0:
# Try clipping the end if it's got longer
try:
e = self.current_length() - self.orig_length
if e > 0:
for _ in range(e):
self.shell.readline.remove_history_item(self.orig_length)
# If it still doesn't match, just reload readline history.
if self.current_length() != self.orig_length \
or self.get_readline_tail() != self.readline_tail:
self.shell.refill_readline_hist()
except (AttributeError, IndexError):
pass
# Returning False will cause exceptions to propagate
return False
def current_length(self):
return self.shell.readline.get_current_history_length()
def get_readline_tail(self, n=10):
"""Get the last n items in readline history."""
end = self.shell.readline.get_current_history_length() + 1
start = max(end-n, 1)
ghi = self.shell.readline.get_history_item
return [ghi(x) for x in range(start, end)]
#-----------------------------------------------------------------------------
# Main IPython class
#-----------------------------------------------------------------------------
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
autocall = Enum((0,1,2), default_value=0, config=True, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
)
# TODO: remove all autoindent logic and put into frontends.
# We can't do this yet because even runlines uses the autoindent.
autoindent = CBool(True, config=True, help=
"""
Autoindent IPython code entered interactively.
"""
)
automagic = CBool(True, config=True, help=
"""
Enable magic commands to be called without the leading %.
"""
)
cache_size = Integer(1000, config=True, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 20 (if
you provide a value less than 20, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
)
color_info = CBool(True, config=True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
)
colors = CaselessStrEnum(('NoColor','LightBG','Linux'),
default_value=get_default_colors(), config=True,
help="Set the color scheme (NoColor, Linux, or LightBG)."
)
colors_force = CBool(False, help=
"""
Force use of ANSI color codes, regardless of OS and readline
availability.
"""
# FIXME: This is essentially a hack to allow ZMQShell to show colors
# without readline on Win32. When the ZMQ formatting system is
# refactored, this should be removed.
)
debug = CBool(False, config=True)
deep_reload = CBool(False, config=True, help=
"""
Enable deep (recursive) reloading by default. IPython can use the
deep_reload module which reloads changes in modules recursively (it
replaces the reload() function, so you don't need to change anything to
use it). deep_reload() forces a full reload of modules whose code may
have changed, which the default reload() function does not. When
deep_reload is off, IPython will use the normal reload(), but
deep_reload will still be available as dreload().
"""
)
disable_failing_post_execute = CBool(False, config=True,
help="Don't call post-execute functions that have failed in the past."
)
display_formatter = Instance(DisplayFormatter)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
exit_now = CBool(False)
exiter = Instance(ExitAutocall)
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('', config=True) # Set to get_ipython_dir() in __init__
# Input splitter, to split entire cells of input into either individual
# interactive statements or whole blocks.
input_splitter = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
(), {})
logstart = CBool(False, config=True, help=
"""
Start logging to the default log file.
"""
)
logfile = Unicode('', config=True, help=
"""
The name of the logfile to use.
"""
)
logappend = Unicode('', config=True, help=
"""
Start logging to the given file in append mode.
"""
)
object_info_string_level = Enum((0,1,2), default_value=0,
config=True)
pdb = CBool(False, config=True, help=
"""
Automatically call the pdb debugger after every exception.
"""
)
multiline_history = CBool(sys.platform != 'win32', config=True,
help="Save multi-line entries as one entry in readline history"
)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ', config=True,
help="Deprecated, use PromptManager.in_template")
prompt_in2 = Unicode(' .\\D.: ', config=True,
help="Deprecated, use PromptManager.in2_template")
prompt_out = Unicode('Out[\\#]: ', config=True,
help="Deprecated, use PromptManager.out_template")
prompts_pad_left = CBool(True, config=True,
help="Deprecated, use PromptManager.justify")
def _prompt_trait_changed(self, name, old, new):
table = {
'prompt_in1' : 'in_template',
'prompt_in2' : 'in2_template',
'prompt_out' : 'out_template',
'prompts_pad_left' : 'justify',
}
warn("InteractiveShell.{name} is deprecated, use PromptManager.{newname}\n".format(
name=name, newname=table[name])
)
# protect against weird cases where self.config may not exist:
if self.config is not None:
# propagate to corresponding PromptManager trait
setattr(self.config.PromptManager, table[name], new)
_prompt_in1_changed = _prompt_trait_changed
_prompt_in2_changed = _prompt_trait_changed
_prompt_out_changed = _prompt_trait_changed
_prompt_pad_left_changed = _prompt_trait_changed
show_rewritten_input = CBool(True, config=True,
help="Show rewritten input, e.g. for autocall."
)
quiet = CBool(False, config=True)
history_length = Integer(10000, config=True)
# The readline stuff will eventually be moved to the terminal subclass
# but for now, we can't do that as readline is welded in everywhere.
readline_use = CBool(True, config=True)
readline_remove_delims = Unicode('-/~', config=True)
# don't use \M- bindings by default, because they
# conflict with 8-bit encodings. See gh-58,gh-88
readline_parse_and_bind = List([
'tab: complete',
'"\C-l": clear-screen',
'set show-all-if-ambiguous on',
'"\C-o": tab-insert',
'"\C-r": reverse-search-history',
'"\C-s": forward-search-history',
'"\C-p": history-search-backward',
'"\C-n": history-search-forward',
'"\e[A": history-search-backward',
'"\e[B": history-search-forward',
'"\C-k": kill-line',
'"\C-u": unix-line-discard',
], allow_none=False, config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none'],
default_value='last_expr', config=True,
help="""
'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions).""")
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n', config=True)
separate_out = SeparateUnicode('', config=True)
separate_out2 = SeparateUnicode('', config=True)
wildcards_case_sensitive = CBool(True, config=True)
xmode = CaselessStrEnum(('Context','Plain', 'Verbose'),
default_value='Context', config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager')
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager')
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap')
display_trap = Instance('IPython.core.display_trap.DisplayTrap')
extension_manager = Instance('IPython.core.extensions.ExtensionManager')
plugin_manager = Instance('IPython.core.plugin.PluginManager')
payload_manager = Instance('IPython.core.payload.PayloadManager')
history_manager = Instance('IPython.core.history.HistoryManager')
magics_manager = Instance('IPython.core.magic.MagicsManager')
profile_dir = Instance('IPython.core.application.ProfileDir')
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Instance(dict)
def __init__(self, config=None, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None)):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(config=config)
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_pushd_popd_magic()
# self.init_traceback_handlers use to be here, but we moved it below
# because it and init_io have to come after init_readline.
self.init_user_ns()
self.init_logger()
self.init_alias()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
# init_readline() must come before init_io(), because init_io uses
# readline related things.
self.init_readline()
# We save this here in case user code replaces raw_input, but it needs
# to be after init_readline(), because PyPy's readline works by replacing
# raw_input.
if py3compat.PY3:
self.raw_input_original = input
else:
self.raw_input_original = raw_input
# init_completer must come after init_readline, because it needs to
# know whether readline is present or not system-wide to configure the
# completers, since the completion machinery can now operate
# independently of readline (e.g. over the network)
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_displayhook()
self.init_reload_doctest()
self.init_magics()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_plugin_manager()
self.init_payload()
self.hooks.late_startup_hook()
atexit.register(self.atexit_operations)
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
def _ipython_dir_changed(self, name, new):
if not os.path.isdir(new):
os.makedirs(new, mode = 0777)
def set_autoindent(self,value=None):
"""Set the autoindent flag, checking for readline support.
If called with no arguments, it acts as a toggle."""
if value != 0 and not self.has_readline:
if os.name == 'posix':
warn("The auto-indent feature requires the readline library")
self.autoindent = 0
return
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir =\
ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = CachingCompiler()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
# Keep track of readline usage (later set by init_readline)
self.has_readline = False
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwdu()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
def init_syntax_highlighting(self):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser().format
self.pycolorize = lambda src: pyformat(src,'str',self.colors)
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
# In 0.11 we introduced '__IPYTHON__active' as an integer we'd try to
# manage on enter/exit, but with all our shells it's virtually
# impossible to get all the cases right. We're leaving the name in for
# those who adapted their codes to check for this flag, but will
# eventually remove it after a few more releases.
builtin_mod.__dict__['__IPYTHON__active'] = \
'Deprecated, check for __IPYTHON__'
self.builtin_trap = BuiltinTrap(shell=self)
def init_inspector(self):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
'NoColor',
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
if sys.platform == 'win32' and self.has_readline:
io.stdout = io.stderr = io.IOStream(self.readline._outputfile)
else:
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
self.prompt_manager = PromptManager(shell=self, config=self.config)
self.configurables.append(self.prompt_manager)
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(config=self.config)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(config=self.config)
self.configurables.append(self.display_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
config=self.config,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_reload_doctest(self):
# Do a proper resetting of doctest, including the necessary displayhook
# monkeypatching
try:
doctest_reload()
except ImportError:
warn("doctest module does not exist.")
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
if sys.executable.startswith(os.environ['VIRTUAL_ENV']):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.\n")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {}
self._orig_sys_module_state['stdin'] = sys.stdin
self._orig_sys_module_state['stdout'] = sys.stdout
self._orig_sys_module_state['stderr'] = sys.stderr
self._orig_sys_module_state['excepthook'] = sys.excepthook
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.iteritems():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100)
def set_hook(self,name,hook, priority = 50, str_key = None, re_key = None):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print "Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ )
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
def register_post_execute(self, func):
"""Register a function for calling after code execution.
"""
if not callable(func):
raise ValueError('argument %s must be callable' % func)
self._post_execute[func] = True
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self,ns=None):
"""Return a new 'main' module object for user code execution.
"""
main_mod = self._user_main_module
init_fakemod_dict(main_mod,ns)
return main_mod
def cache_main_mod(self,ns,fname):
"""Cache a main module's namespace.
When scripts are executed via %run, we must keep a reference to the
namespace of their __main__ module (a FakeModule instance) around so
that Python doesn't clear it, rendering objects defined therein
useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the module object (which corresponds to the script
path). This way, for multiple executions of the same script we only
keep one copy of the namespace (the last one), thus preventing memory
leaks from old references while allowing the objects from the last
execution to be accessible.
Note: we can not allow the actual FakeModule instances to be deleted,
because of how Python tears down modules (it hard-sets all their
references to None without regard for reference counts). This method
must therefore make a *copy* of the given namespace, to allow the
original module's __dict__ to be cleared and reused.
Parameters
----------
ns : a namespace (a dict, typically)
fname : str
Filename associated with the namespace.
Examples
--------
In [10]: import IPython
In [11]: _ip.cache_main_mod(IPython.__dict__,IPython.__file__)
In [12]: IPython.__file__ in _ip._main_ns_cache
Out[12]: True
"""
self._main_ns_cache[os.path.abspath(fname)] = ns.copy()
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: _ip.cache_main_mod(IPython.__dict__,IPython.__file__)
In [17]: len(_ip._main_ns_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_ns_cache) == 0
Out[19]: True
"""
self._main_ns_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError,'new call_pdb value must be boolean'
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pydb/pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
# use pydb if available
if debugger.has_pydb:
from pydb import pm
else:
# fallback to our internal debugger
pm = lambda : self.InteractiveTB.debugger(force=True)
with self.readline_no_record:
pm()
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <[email protected]>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <[email protected]> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = set()
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so docetst and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_ns_cache = {}
# And this is the single instance of FakeModule whose __dict__ we keep
# copying and clearing for reuse on each %run
self._user_main_module = FakeModule()
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
class DummyMod(object):
"A dummy module used for IPython's interactive namespace."
pass
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
therm.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so theye really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = dict()
# Put 'help' in the user namespace
try:
from site import _Helper
ns['help'] = _Helper()
except ImportError:
warn('help() not available - check site.py')
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
ns['_sh'] = shadowns
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns,
self._user_main_module.__dict__] + self._main_ns_cache.values()
def reset(self, new_session=True):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
# Clear out the namespace from the last %run
self.new_main_mod()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.iteritems() if o is obj]
for name in to_delete:
del ns[name]
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (basestring, list, tuple)):
if isinstance(variables, basestring):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print ('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
user_ns_hidden.difference_update(vdict)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.iteritems():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.discard(name)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
#print '1- oname: <%r>' % oname # dbg
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not py3compat.isidentifier(oname, dotted=True):
return dict(found=False)
alias_ns = None
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
('Alias', self.alias_manager.alias_table),
]
alias_ns = self.alias_manager.alias_table
# initialize results to 'null'
found = False; obj = None; ospace = None; ds = None;
ismagic = False; isalias = False; parent = None
# We need to special-case 'print', which as of python2.6 registers as a
# function but should only be treated as one if print_function was
# loaded with a future import. In this case, just bail.
if (oname == 'print' and not py3compat.PY3 and not \
(self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)):
return {'found':found, 'obj':obj, 'namespace':ospace,
'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
#print 'oname_rest:', oname_rest # dbg
for part in oname_rest:
try:
parent = obj
obj = getattr(obj,part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
if ns == alias_ns:
isalias = True
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {'found':found, 'obj':obj, 'namespace':ospace,
'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
def _ofind_property(self, oname, info):
"""Second part of object finding, to look for property details."""
if info.found:
# Get the docstring of the class property if it exists.
path = oname.split('.')
root = '.'.join(path[:-1])
if info.parent is not None:
try:
target = getattr(info.parent, '__class__')
# The object belongs to a class instance.
try:
target = getattr(target, path[-1])
# The class defines the object.
if isinstance(target, property):
oname = root + '.__class__.' + path[-1]
info = Struct(self._ofind(oname))
except AttributeError: pass
except AttributeError: pass
# We return either the new info or the unmodified input if the object
# hadn't been found
return info
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
inf = Struct(self._ofind(oname, namespaces))
return Struct(self._ofind_property(oname, inf))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends."""
info = self._object_find(oname, namespaces)
if info.found:
pmethod = getattr(self.inspector, meth)
formatter = format_screen if info.ismagic else None
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(info.obj, oname, formatter, info, **kw)
else:
pmethod(info.obj, oname)
else:
print 'Object `%s` not found.' % oname
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, config=self.config)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor')
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=self.compile.check_cache)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple,handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
assert type(exc_tuple)==type(()) , \
"The custom exceptions must be given AS A TUPLE."
def dummy_handler(self,etype,value,tb,tb_offset=None):
print '*** Simple custom exception handler ***'
print 'Exception type :',etype
print 'Exception value:',value
print 'Traceback :',tb
#print 'Source code :','\n'.join(self.buffer)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, basestring):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, basestring):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print >> io.stderr, "Custom TB Handler failed, unregistering"
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print >> io.stdout, self.InteractiveTB.stb2text(stb)
print >> io.stdout, "The original exception:"
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype,value,tb),tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def showtraceback(self,exc_tuple = None,filename=None,tb_offset=None,
exception_only=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
self.write_err('No traceback available to show.\n')
return
if etype is SyntaxError:
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename)
elif etype is UsageError:
self.write_err("UsageError: %s" % value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
self.write_err("\nKeyboardInterrupt\n")
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print >> io.stdout, self.InteractiveTB.stb2text(stb)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
"""
etype, value, last_traceback = self._get_exc_info()
if filename and etype is SyntaxError:
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
stb = self.SyntaxTB.structured_traceback(etype, value, [])
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""Command history completion/saving/reloading."""
if self.readline_use:
import IPython.utils.rlineimpl as readline
self.rl_next_input = None
self.rl_do_indent = False
if not self.readline_use or not readline.have_readline:
self.has_readline = False
self.readline = None
# Set a number of methods that depend on readline to be no-op
self.readline_no_record = no_op_context
self.set_readline_completer = no_op
self.set_custom_completer = no_op
if self.readline_use:
warn('Readline services not available or not loaded.')
else:
self.has_readline = True
self.readline = readline
sys.modules['readline'] = readline
# Platform-specific configuration
if os.name == 'nt':
# FIXME - check with Frederick to see if we can harmonize
# naming conventions with pyreadline to avoid this
# platform-dependent check
self.readline_startup_hook = readline.set_pre_input_hook
else:
self.readline_startup_hook = readline.set_startup_hook
# Load user's initrc file (readline config)
# Or if libedit is used, load editrc.
inputrc_name = os.environ.get('INPUTRC')
if inputrc_name is None:
inputrc_name = '.inputrc'
if readline.uses_libedit:
inputrc_name = '.editrc'
inputrc_name = os.path.join(self.home_dir, inputrc_name)
if os.path.isfile(inputrc_name):
try:
readline.read_init_file(inputrc_name)
except:
warn('Problems reading readline initialization file <%s>'
% inputrc_name)
# Configure readline according to user's prefs
# This is only done if GNU readline is being used. If libedit
# is being used (as on Leopard) the readline config is
# not run as the syntax for libedit is different.
if not readline.uses_libedit:
for rlcommand in self.readline_parse_and_bind:
#print "loading rl:",rlcommand # dbg
readline.parse_and_bind(rlcommand)
# Remove some chars from the delimiters list. If we encounter
# unicode chars, discard them.
delims = readline.get_completer_delims()
if not py3compat.PY3:
delims = delims.encode("ascii", "ignore")
for d in self.readline_remove_delims:
delims = delims.replace(d, "")
delims = delims.replace(ESC_MAGIC, '')
readline.set_completer_delims(delims)
# otherwise we end up with a monster history after a while:
readline.set_history_length(self.history_length)
self.refill_readline_hist()
self.readline_no_record = ReadlineNoRecord(self)
# Configure auto-indent for all platforms
self.set_autoindent(self.autoindent)
def refill_readline_hist(self):
# Load the last 1000 lines from history
self.readline.clear_history()
stdin_encoding = sys.stdin.encoding or "utf-8"
last_cell = u""
for _, _, cell in self.history_manager.get_tail(1000,
include_latest=True):
# Ignore blank lines and consecutive duplicates
cell = cell.rstrip()
if cell and (cell != last_cell):
if self.multiline_history:
self.readline.add_history(py3compat.unicode_to_str(cell,
stdin_encoding))
else:
for line in cell.splitlines():
self.readline.add_history(py3compat.unicode_to_str(line,
stdin_encoding))
last_cell = cell
def set_next_input(self, s):
""" Sets the 'default' input string for the next command line.
Requires readline.
Example:
[D:\ipython]|1> _ip.set_next_input("Hello Word")
[D:\ipython]|2> Hello Word_ # cursor is here
"""
self.rl_next_input = py3compat.cast_bytes_py2(s)
# Maybe move this to the terminal subclass?
def pre_readline(self):
"""readline hook to be used at the start of each line.
Currently it handles auto-indent only."""
if self.rl_do_indent:
self.readline.insert_text(self._indent_current_str())
if self.rl_next_input is not None:
self.readline.insert_text(self.rl_next_input)
self.rl_next_input = None
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.indent_spaces * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programatically (such as in test suites) or out-of-prcess
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
alias_table=self.alias_manager.alias_table,
use_readline=self.has_readline,
config=self.config,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
# Only configure readline if we truly are using readline. IPython can
# do tab-completion over the network, in GUIs, etc, where readline
# itself may be absent
if self.has_readline:
self.set_readline_completer()
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0):
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted."""
newcomp = types.MethodType(completer,self.Completer)
self.Completer.matchers.insert(pos,newcomp)
def set_readline_completer(self):
"""Reset readline's completer to be our own."""
self.readline.set_completer(self.Completer.rlcomplete)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
confg=self.config,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magic_function = self.magics_manager.register_function
self.define_magic = self.magics_manager.define_magic
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DeprecatedMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PylabMagics, m.ScriptMagics,
)
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.magic('colors %s' % self.colors)
def run_line_magic(self, magic_name, line):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
args.append(sys._getframe(stack_depth).f_locals)
with self.builtin_trap:
result = fn(*args)
return result
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic function `%%%%%s` not found%s."
extra = '' if lm is None else (' (But line magic `%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
with self.builtin_trap:
result = fn(line, cell)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, basestring):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
cmd = py3compat.unicode_to_str(cmd)
ec = os.system(cmd)
else:
cmd = py3compat.unicode_to_str(cmd)
ec = os.system(cmd)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, config=self.config)
self.configurables.append(self.alias_manager)
self.ns_table['alias'] = self.alias_manager.alias_table,
#-------------------------------------------------------------------------
# Things related to extensions and plugins
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, config=self.config)
self.configurables.append(self.extension_manager)
def init_plugin_manager(self):
self.plugin_manager = PluginManager(config=self.config)
self.configurables.append(self.plugin_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(config=self.config)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, config=self.config)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
rw = self.prompt_manager.render('rewrite') + cmd
try:
# plain ascii works better w/ pyreadline, on some machines, so
# we use it and only print uncolored rewrite if we have unicode
rw = str(rw)
print >> io.stdout, rw
except UnicodeEncodeError:
print "------> " + cmd
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _simple_error(self):
etype, value = sys.exc_info()[:2]
return u'[ERROR] {e.__name__}: {v}'.format(e=etype, v=value)
def user_variables(self, names):
"""Get a list of variable names from the user's namespace.
Parameters
----------
names : list of strings
A list of names of variables to be read from the user namespace.
Returns
-------
A dict, keyed by the input names and with the repr() of each value.
"""
out = {}
user_ns = self.user_ns
for varname in names:
try:
value = repr(user_ns[varname])
except:
value = self._simple_error()
out[varname] = value
return out
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the repr() of each
value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.iteritems():
try:
value = repr(eval(expr, global_ns, user_ns))
except:
value = self._simple_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec cmd in self.user_global_ns, self.user_ns
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, **kw):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
kw.setdefault('exit_ignore', False)
kw.setdefault('raise_exceptions', False)
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname) as thefile:
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname):
try:
py3compat.execfile(fname,*where)
except SystemExit, status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if kw['raise_exceptions']:
raise
if status.code not in (0, None) and not kw['exit_ignore']:
self.showtraceback(exception_only=True)
except:
if kw['raise_exceptions']:
raise
self.showtraceback()
def safe_execfile_ipy(self, fname):
"""Like safe_execfile, but for .ipy files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy extension.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname) as thefile:
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname):
try:
with open(fname) as thefile:
# self.run_cell currently captures all exceptions
# raised in user code. It would be nice if there were
# versions of runlines, execfile that did raise, so
# we could catch the errors.
self.run_cell(thefile.read(), store_history=False)
except:
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def _run_cached_cell_magic(self, magic_name, line):
"""Special method to call a cell magic with the data stored in self.
"""
cell = self._current_cell_magic_body
self._current_cell_magic_body = None
return self.run_cell_magic(magic_name, line, cell)
def run_cell(self, raw_cell, store_history=False, silent=False):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effets, such as implicit displayhooks, history,
and logging. silent=True forces store_history=False.
"""
if (not raw_cell) or raw_cell.isspace():
return
if silent:
store_history = False
self.input_splitter.push(raw_cell)
# Check for cell magics, which leave state behind. This interface is
# ugly, we need to do something cleaner later... Now the logic is
# simply that the input_splitter remembers if there was a cell magic,
# and in that case we grab the cell body.
if self.input_splitter.cell_magic_parts:
self._current_cell_magic_body = \
''.join(self.input_splitter.cell_magic_parts)
cell = self.input_splitter.source_reset()
with self.builtin_trap:
prefilter_failed = False
if len(cell.splitlines()) == 1:
try:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
except AliasError as e:
error(e)
prefilter_failed = True
except Exception:
# don't allow prefilter errors to crash IPython
self.showtraceback()
prefilter_failed = True
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
if not prefilter_failed:
# don't run if prefilter failed
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
try:
code_ast = self.compile.ast_parse(cell,
filename=cell_name)
except IndentationError:
self.showindentationerror()
if store_history:
self.execution_count += 1
return None
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError):
self.showsyntaxerror()
if store_history:
self.execution_count += 1
return None
interactivity = "none" if silent else self.ast_node_interactivity
self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity)
# Execute any registered post-execution functions.
# unless we are silent
post_exec = [] if silent else self._post_execute.iteritems()
for func, status in post_exec:
if self.disable_failing_post_execute and not status:
continue
try:
func()
except KeyboardInterrupt:
print >> io.stderr, "\nKeyboardInterrupt"
except Exception:
# register as failing:
self._post_execute[func] = False
self.showtraceback()
print >> io.stderr, '\n'.join([
"post-execution function %r produced an error." % func,
"If this problem persists, you can disable failing post-exec functions with:",
"",
" get_ipython().disable_failing_post_execute = True"
])
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr'):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions). 'last_expr'
will run the last node interactively only if it is an expression (i.e.
expressions in loops or other blocks are not displayed. Other values
for this parameter will raise a ValueError.
"""
if not nodelist:
return
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
exec_count = self.execution_count
try:
for i, node in enumerate(to_run_exec):
mod = ast.Module([node])
code = self.compile(mod, cell_name, "exec")
if self.run_code(code):
return True
for i, node in enumerate(to_run_interactive):
mod = ast.Interactive([node])
code = self.compile(mod, cell_name, "single")
if self.run_code(code):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
self.showtraceback()
return False
def run_code(self, code_obj):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
Returns
-------
False : successful execution.
True : an error occurred.
"""
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook,sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = 1 # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
#rprint('Running code', repr(code_obj)) # dbg
exec code_obj in self.user_global_ns, self.user_ns
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit:
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", level=1)
except self.custom_exceptions:
etype,value,tb = sys.exc_info()
self.CustomTB(etype,value,tb)
except:
self.showtraceback()
else:
outflag = 0
return outflag
# For backwards compatibility
runcode = run_code
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_pylab(self, gui=None, import_all=True):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional :param:`gui` argument.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core.pylabtools import mpl_runner
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
try:
gui = pylab_activate(ns, gui, import_all, self)
except KeyError:
error("Backend %r not supported" % gui)
return
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
mpl_runner(self.safe_execfile)
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
ns.update(sys._getframe(depth+1).f_locals)
ns.pop('self', None)
try:
cmd = formatter.format(cmd, **ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mktemp, but it registers the created
filename internally so ipython cleans it up at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
filename = tempfile.mktemp('.py', prefix)
self.tempfiles.append(filename)
if data:
tmp_file = open(filename,'w')
tmp_file.write(data)
tmp_file.close()
return filename
# TODO: This should be removed when Term is refactored.
def write(self,data):
"""Write a string to the default output"""
io.stdout.write(data)
# TODO: This should be removed when Term is refactored.
def write_err(self,data):
"""Write a string to the default error output"""
io.stderr.write(data)
def ask_yes_no(self, prompt, default=None):
if self.quiet:
return True
return ask_yes_no(prompt,default)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
Optional Parameters:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
correspnding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
utarget = unquote_filename(target)
try:
if utarget.startswith(('http://', 'https://')):
return openpy.read_py_url(utarget, skip_encoding_cookie=True)
except UnicodeDecodeError:
if not py_only :
response = urllib.urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % utarget)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=True)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, basestring):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
class InteractiveShellABC(object):
"""An abstract base class for InteractiveShell."""
__metaclass__ = abc.ABCMeta
InteractiveShellABC.register(InteractiveShell)
| lgpl-3.0 |
Obus/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/style/core.py | 8 | 6888 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
"""
Core functions and attributes for the matplotlib style library:
``use``
Select style sheet to override the current matplotlib settings.
``context``
Context manager to use a style sheet temporarily.
``available``
List available style sheets.
``library``
A dictionary of style names and matplotlib settings.
"""
import os
import re
import contextlib
import matplotlib as mpl
from matplotlib import cbook
from matplotlib import rc_params_from_file
__all__ = ['use', 'context', 'available', 'library', 'reload_library']
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')
# Users may want multiple library paths, so store a list of paths.
USER_LIBRARY_PATHS = [os.path.join(mpl._get_configdir(), 'stylelib')]
STYLE_EXTENSION = 'mplstyle'
STYLE_FILE_PATTERN = re.compile('([\S]+).%s$' % STYLE_EXTENSION)
def is_style_file(filename):
"""Return True if the filename looks like a style file."""
return STYLE_FILE_PATTERN.match(filename) is not None
def use(style):
"""Use matplotlib style settings from a style specification.
The style name of 'default' is reserved for reverting back to
the default style settings.
Parameters
----------
style : str, dict, or list
A style specification. Valid options are:
+------+-------------------------------------------------------------+
| str | The name of a style or a path/URL to a style file. For a |
| | list of available style names, see `style.available`. |
+------+-------------------------------------------------------------+
| dict | Dictionary with valid key/value pairs for |
| | `matplotlib.rcParams`. |
+------+-------------------------------------------------------------+
| list | A list of style specifiers (str or dict) applied from first |
| | to last in the list. |
+------+-------------------------------------------------------------+
"""
if cbook.is_string_like(style) or hasattr(style, 'keys'):
# If name is a single str or dict, make it a single element list.
styles = [style]
else:
styles = style
for style in styles:
if not cbook.is_string_like(style):
mpl.rcParams.update(style)
continue
elif style == 'default':
mpl.rcdefaults()
continue
if style in library:
mpl.rcParams.update(library[style])
else:
try:
rc = rc_params_from_file(style, use_default_template=False)
mpl.rcParams.update(rc)
except IOError:
msg = ("'%s' not found in the style library and input is "
"not a valid URL or path. See `style.available` for "
"list of available styles.")
raise IOError(msg % style)
@contextlib.contextmanager
def context(style, after_reset=False):
"""Context manager for using style settings temporarily.
Parameters
----------
style : str, dict, or list
A style specification. Valid options are:
+------+-------------------------------------------------------------+
| str | The name of a style or a path/URL to a style file. For a |
| | list of available style names, see `style.available`. |
+------+-------------------------------------------------------------+
| dict | Dictionary with valid key/value pairs for |
| | `matplotlib.rcParams`. |
+------+-------------------------------------------------------------+
| list | A list of style specifiers (str or dict) applied from first |
| | to last in the list. |
+------+-------------------------------------------------------------+
after_reset : bool
If True, apply style after resetting settings to their defaults;
otherwise, apply style on top of the current settings.
"""
initial_settings = mpl.rcParams.copy()
if after_reset:
mpl.rcdefaults()
try:
use(style)
except:
# Restore original settings before raising errors during the update.
mpl.rcParams.update(initial_settings)
raise
else:
yield
finally:
mpl.rcParams.update(initial_settings)
def load_base_library():
"""Load style library defined in this package."""
library = dict()
library.update(read_style_directory(BASE_LIBRARY_PATH))
return library
def iter_user_libraries():
for stylelib_path in USER_LIBRARY_PATHS:
stylelib_path = os.path.expanduser(stylelib_path)
if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
yield stylelib_path
def update_user_library(library):
"""Update style library with user-defined rc files"""
for stylelib_path in iter_user_libraries():
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
return library
def iter_style_files(style_dir):
"""Yield file path and name of styles in the given directory."""
for path in os.listdir(style_dir):
filename = os.path.basename(path)
if is_style_file(filename):
match = STYLE_FILE_PATTERN.match(filename)
path = os.path.abspath(os.path.join(style_dir, path))
yield path, match.groups()[0]
def read_style_directory(style_dir):
"""Return dictionary of styles defined in `style_dir`."""
styles = dict()
for path, name in iter_style_files(style_dir):
styles[name] = rc_params_from_file(path, use_default_template=False)
return styles
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts (or dict-like), so you shouldn't replace the nested dict if it
already exists. Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in six.iteritems(new_dict):
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
# Load style library
# ==================
_base_library = load_base_library()
library = None
available = []
def reload_library():
"""Reload style library."""
global library, available
library = update_user_library(_base_library)
available[:] = library.keys()
reload_library()
| mit |
walterreade/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
setten/pymatgen | pymatgen/analysis/tests/test_interface_reactions.py | 4 | 11082 | from __future__ import division, unicode_literals
import matplotlib
matplotlib.use('pdf')
import unittest as unittest
import numpy as np
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
GrandPotentialPhaseDiagram
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
def setUp(self):
self.entries = [ComputedEntry(Composition('Li'), 0),
ComputedEntry(Composition('Mn'), 0),
ComputedEntry(Composition('O2'), 0),
ComputedEntry(Composition('MnO2'), -10),
ComputedEntry(Composition('Mn2O4'), -60),
ComputedEntry(Composition('MnO3'), 20),
ComputedEntry(Composition('Li2O'), -10),
ComputedEntry(Composition('LiMnO2'), -30),
]
self.pd = PhaseDiagram(self.entries)
chempots = {'Li': -3}
self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
self.ir = []
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'), self.pd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=None))
self.ir.append(
InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd))
self.ir.append(
InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=0,
pd_non_grand=self.pd))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd))
with self.assertRaises(Exception) as context1:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide grand phase diagram to compute no_mixing_energy!' == str(
context1.exception))
with self.assertRaises(Exception) as context2:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.gpd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide non-grand phase diagram to compute no_mixing_energy!' == str(
context2.exception))
def test_get_entry_energy(self):
# Test AssertionError
comp = Composition('MnO3')
with self.assertRaises(Exception) as context1:
energy = self.ir[0]._get_entry_energy(self.pd, comp)
self.assertTrue(
'The reactant MnO3 has no matching entry with negative formation energy!' == str(
context1.exception))
# Test normal functionality
comp = Composition('MnO2')
test2 = np.isclose(self.ir[0]._get_entry_energy(self.pd, comp), -30,
atol=1e-03)
self.assertTrue(test2,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
def test_get_grand_potential(self):
comp = Composition('LiMnO2')
# Test non-normalized case
test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
atol=1e-03)
self.assertTrue(test1,
'_get_grand_potential: Non-normalized case gets error!')
# Test normalized case
test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -36,
atol=1e-03)
self.assertTrue(test2,
'_get_grand_potential: Normalized case gets error!')
def test_get_energy(self):
test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
self.assertTrue(test1, '_get_energy: phase diagram gets error!')
test2 = (
np.isclose(self.ir[3]._get_energy(0.6666666), -7.333333, atol=1e-03))
self.assertTrue(test2,
'_get_energy: grand canonical phase diagram gets error!')
def test_get_reaction(self):
test1 = str(self.ir[0]._get_reaction(0.5)) == 'O2 + Mn -> MnO2'
self.assertTrue(test1,
'_get_reaction: reaction not involving chempots species gets error!')
test2 = str(self.ir[0]._get_reaction(0.5,
normalize=1)) == '0.5 O2 + 0.5 Mn -> 0.5 MnO2'
self.assertTrue(test2,
'_get_reaction: reaction not involving chempots species gets error!')
test3 = str(self.ir[3]._get_reaction(
0.666666)) == '2 Mn + 2 Li2O -> 4 Li + MnO2 + Mn' or '2 Mn + 2 Li2O -> 4 Li + Mn + MnO2'
self.assertTrue(test3,
'_get_reaction: reaction involving chempots species gets error!')
def test_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [self.ir[0]._convert(x, f1, f2) for x, f1, f2 in test_array]
answer = [0.75, 0.5, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected, but gets {1}'.format(
answer, result))
def test_reverse_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [self.ir[0]._reverse_convert(x, f1, f2) for x, f1, f2 in
test_array]
answer = [0.25, 0.3076923, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected, but gets {1}'.format(
answer, result))
def test_get_products(self):
test1 = sorted(self.ir[0].get_products()) == sorted(
['MnO2', 'O2', 'Mn'])
self.assertTrue(test1,
'get_products: decomposition products gets error for reaction not involving chempots species!')
test2 = sorted(self.ir[3].get_products()) == sorted(
['Li', 'MnO2', 'Mn', 'Li2O'])
self.assertTrue(test2,
'get_decomp: decomposition products gets error for reaction involving chempots species!')
def test_get_kinks(self):
ir = self.ir[0]
lst = list(self.ir[0].get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
test1 = index == [1, 2, 3]
self.assertTrue(test1, 'get_kinks:index gets error!')
test2 = np.allclose(x_kink, [0, 0.5, 1])
self.assertTrue(test2, 'get_kinks:x kinks gets error!')
test3 = np.allclose(energy_kink, [0, -15, 0])
self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
test4 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
self.assertTrue(test4,
'get_kinks:reaction kinks gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
ir = self.ir[4]
def test_labels(self):
ir = self.ir[0]
dict = ir.labels()
test1 = dict == {1: 'x= 0.0 energy = 0.0 Mn -> Mn',
2: 'x= 0.5 energy = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy = 0.0 O2 -> O2'}
self.assertTrue(test1,
'labels:label does not match for interfacial system with {0} and {1}.'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_plot(self):
# Test plot is hard. Here just to call the plot function to see if any error occurs.
for i in self.ir:
i.plot()
def test_minimum(self):
answer = [
(0.5, -15),
(0, 0),
(0.3333333, -10),
(0.6666666, -7.333333),
(0.3333333, -7.333333),
(0.1428571, -7.333333)
]
for i, j in zip(self.ir, answer):
self.assertTrue(np.allclose(i.minimum(), j),
'minimum: the system with {0} and {1} gets error!{2} expected, but gets {3}'.format(
i.c1_original.reduced_formula,
i.c2_original.reduced_formula, str(j),
str(i.minimum())))
def test_get_no_mixing_energy(self):
with self.assertRaises(Exception) as context1:
self.ir[0].get_no_mixing_energy()
self.assertTrue(
'Please provide grand potential phase diagram for computing no_mixing_energy!' == str(
context1.exception))
answer = [
[(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
]
def name_lst(lst):
return (lst[0][0], lst[1][0])
def energy_lst(lst):
return (lst[0][1], lst[1][1])
result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
for i, j in zip(result_info, answer):
self.assertTrue(name_lst(i) == name_lst(j),
'get_no_mixing_energy: names get error, {0} expected but gets {1}'.format(
name_lst(j), name_lst(i)))
self.assertTrue(energy_lst(i) == energy_lst(j),
'get_no_mixing_energy: no_mixing energies get error, {0} expected but gets {1}'.format(
energy_lst(j), energy_lst(i)))
if __name__ == '__main__':
unittest.main()
| mit |
jreback/pandas | pandas/tests/util/test_deprecate.py | 8 | 1626 | from textwrap import dedent
import pytest
from pandas.util._decorators import deprecate
import pandas._testing as tm
def new_func():
"""
This is the summary. The deprecate directive goes next.
This is the extended summary. The deprecate directive goes before this.
"""
return "new_func called"
def new_func_no_docstring():
return "new_func_no_docstring called"
def new_func_wrong_docstring():
"""Summary should be in the next line."""
return "new_func_wrong_docstring called"
def new_func_with_deprecation():
"""
This is the summary. The deprecate directive goes next.
.. deprecated:: 1.0
Use new_func instead.
This is the extended summary. The deprecate directive goes before this.
"""
pass
def test_deprecate_ok():
depr_func = deprecate("depr_func", new_func, "1.0", msg="Use new_func instead.")
with tm.assert_produces_warning(FutureWarning):
result = depr_func()
assert result == "new_func called"
assert depr_func.__doc__ == dedent(new_func_with_deprecation.__doc__)
def test_deprecate_no_docstring():
depr_func = deprecate(
"depr_func", new_func_no_docstring, "1.0", msg="Use new_func instead."
)
with tm.assert_produces_warning(FutureWarning):
result = depr_func()
assert result == "new_func_no_docstring called"
def test_deprecate_wrong_docstring():
msg = "deprecate needs a correctly formatted docstring"
with pytest.raises(AssertionError, match=msg):
deprecate(
"depr_func", new_func_wrong_docstring, "1.0", msg="Use new_func instead."
)
| bsd-3-clause |
nileracecrew/seaborn | seaborn/timeseries.py | 13 | 15218 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
pmyates/plutokore | scripts/generate-plots-for-marissa.py | 2 | 17219 | #!/usr/bin/env python3
""" Generate some simple plots from simulations
This script generates a few simple plots from the given simulation.
The goal is to highlight both the 1st and 2nd outburst in a 4-outburst
simulation.
The plots generated are:
* Density (full-plane reflected)
* Tracers (full-plane reflected)
* Surface brightness (full-plane reflected)
Changes:
* Inital version (Patrick, 27.10.2018)
"""
import os
import sys
if os.path.exists(os.path.expanduser('~/plutokore')):
sys.path.append(os.path.expanduser('~/plutokore'))
else:
sys.path.append(os.path.expanduser('~/uni/plutokore'))
import plutokore as pk
import matplotlib as mpl
mpl.use('PS')
import matplotlib.pyplot as plot
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
import argparse
from plutokore import radio
from numba import jit
from astropy.convolution import convolve, Gaussian2DKernel
import astropy.units as u
from astropy.cosmology import Planck15 as cosmo
from astropy.table import QTable
import pathlib
import h5py
import code
def create_plots(*, sim_dir, plot_dir, output_number, sim_info, observing_properties, plot_properties):
"""
This function creates the three plots we want
"""
# load the simulation information
uv = sim_info['uv']
env = sim_info['env']
jet = sim_info['jet']
# load simulation
sim_data = pk.simulations.load_timestep_data(output_number, sim_dir, mmap=True)
sim_data.x2r[-1] = np.pi
rr, tt = np.meshgrid(sim_data.x1r, sim_data.x2r)
x = rr * np.cos(tt)
y = rr * np.sin(tt)
rmesh, tmesh = np.meshgrid(sim_data.x1, sim_data.x2)
# x, y = pk.simulations.sphericaltocartesian(sim_data, rotation=plot_properties['rotation'])
x = x * uv.length
y = y * uv.length
if plot_properties['plot_in_arcsec']:
x = (x * observing_properties['kpc2arcsec']).to(u.arcsec)
y = (y * observing_properties['kpc2arcsec']).to(u.arcsec)
# let's check if this simulation is quarter-plane or half-plane (2D)
if (sim_data.geometry == 'SPHERICAL') and (len(sim_data.nshp) == 2):
pass
else:
quit('Unsupported geometry and/or dimensions')
is_quarter_plane = (sim_data.x2[-1] - sim_data.x2[0]) < (3.0*np.pi / 4.0)
# plot density
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
rho = sim_data.rho * uv.density.to(u.kg / u.m ** 3).value
im = a.pcolormesh(x, y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, -y, np.log10(rho.T), vmin=-27, vmax=-23, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Density [log10 kg cm^-3]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'density_{output_number:02d}.png'),
)
plot.close(f)
# plot pressure
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
prs = sim_data.prs * uv.pressure.to(u.Pa).value
im = a.pcolormesh(x, y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, -y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, y, np.log10(prs.T), vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Pressure [log10 Pa]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'pressure_{output_number:02d}.png'),
)
plot.close(f)
# plot jet velocity
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
vx = (sim_data.vx1 * (np.sin(tmesh.T)) + rmesh.T * sim_data.vx2 * (np.cos(tmesh.T))) * uv.speed.to(u.km / u.s).value
vx = sim_data.vx1 * uv.speed.to(u.km / u.s).value
# import ipdb; ipdb.set_trace()
im = a.pcolormesh(x, y, vx.T, vmin=None, vmax=None, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(x, -y, vx.T, vmin=None, vmax=None, rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-x, -y, vx.T, vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
im = a.pcolormesh(-x, y, vx.T, vmin=-16, vmax=-11.5, rasterized = True, edgecolors = 'none', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Velocity [km s^-1]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'velocity_{output_number:02d}.png'),
)
plot.close(f)
# plot tracer
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = plot_properties, observing_properties = observing_properties)
tracer_count = pk.simulations.get_tracer_count_data(sim_data)
tr1 = sim_data.tr1
im1 = a.pcolormesh(x, y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(x, -y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im1 = a.pcolormesh(-x, -y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(-x, y, tr1.T, vmin=0, vmax=1, cmap='Blues_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
# only plot second tracer if we have more than one!
if tracer_count > 1:
tr2 = sim_data.tr2
im1 = a.pcolormesh(x, y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(x, -y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
if is_quarter_plane:
im1 = a.pcolormesh(-x, -y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
im1 = a.pcolormesh(-x, y, tr2.T, vmin=0, vmax=1, cmap='Reds_alpha', rasterized = True, edgecolors = 'none', shading = 'flat')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'tracers_{output_number:02d}.png'),
)
plot.close(f)
f,a = setup_figure(sim_time = (sim_data.SimTime * uv.time).to(u.Myr), plot_properties = {**plot_properties, 'plot_in_arcsec': True}, observing_properties = observing_properties)
(X, Y, sb) = calculate_surface_brightness(
sim_data = sim_data,
uv = uv,
observing_properties = observing_properties,
is_quarter_plane = is_quarter_plane,
do_convolve = True,
)
im = a.pcolormesh(Y, X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
im = a.pcolormesh(Y, -X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
if is_quarter_plane:
im = a.pcolormesh(-Y, X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
im = a.pcolormesh(-Y, -X, np.log10(sb.value), vmin=-3, vmax=2, rasterized = True, edgecolors = 'face', shading = 'flat')
cb = f.colorbar(im)
cb.set_label('Surface Brightness [log10 mJy beam^-1]')
save_figure(
fig=f,
ax=a,
cbx=cb,
plot_properties=plot_properties,
fig_path=os.path.join(plot_dir, f'sb_{output_number:02d}.png'),
)
plot.close(f)
def setup_figure(*, sim_time, plot_properties, observing_properties):
fig,ax = plot.subplots(figsize=(10,5))
ax.set_xlim(observing_properties['xlim'].value)
ax.set_ylim(observing_properties['ylim'].value)
if plot_properties['plot_in_arcsec']:
ax.set_xlabel('X ["]')
ax.set_ylabel('Y ["]')
else:
ax.set_xlabel('X [kpc]')
ax.set_ylabel('Y [kpc]')
ax.set_title(f'{sim_time:0.02f}')
ax.set_aspect('equal')
return fig,ax
def save_figure(*, fig, ax, cbx, plot_properties, fig_path):
if plot_properties['fluff'] is False:
if cbx.ax in fig.axes:
fig.delaxes(cbx.ax)
ax.set_title('')
ax.set_position([0, 0, 1, 1])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
fig.savefig(fig_path, dpi=plot_properties['dpi'], bbox_inches='tight')
@jit(nopython=True, cache=True)
def raytrace_surface_brightness(r, theta, x, y, z, raytraced_values, original_values):
phi = 0
rmax = np.max(r)
thetamax = np.max(theta)
x_half_step = (x[1] - x[0]) * 0.5
pi2_recip = (1 / (2 * np.pi))
visited = np.zeros(original_values.shape)
for x_index in range(len(x)):
for z_index in range(len(z)):
visited[:,:] = 0
for y_index in range(len(y)):
# Calculate the coordinates of this point
ri = np.sqrt(x[x_index] **2 + y[y_index] ** 2 + z[z_index] ** 2)
if ri == 0:
continue
if ri > rmax:
continue
thetai = np.arccos(z[z_index] / ri)
if thetai > thetamax:
continue
phii = 0 # Don't care about phii!!
chord_length = np.abs(np.arctan2(y[y_index], x[x_index] + x_half_step) - np.arctan2(y[y_index], x[x_index] - x_half_step))
# Now find index in r and theta arrays corresponding to this point
r_index = np.argmax(r>ri)
theta_index = np.argmax(theta>thetai)
# Only add this if we have not already visited this cell (twice)
if visited[r_index, theta_index] <= 1:
raytraced_values[x_index, z_index] += original_values[r_index, theta_index] * chord_length * pi2_recip
visited[r_index, theta_index] += 1
#return raytraced_values
return
def calculate_surface_brightness(*, sim_data, uv, observing_properties, do_convolve, is_quarter_plane):
xlim = observing_properties['ylim']
ylim = observing_properties['xlim']
# calculate beam radius
sigma_beam = (observing_properties['beamwidth'] / 2.355)
# calculate kpc per arcsec
kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(observing_properties['redshift']).to(u.kpc / u.arcsec)
# load timestep data file
d = sim_data
# calculate luminosity and unraytraced flux
l = radio.get_luminosity(d, uv, observing_properties['redshift'], observing_properties['beamwidth'])
f = radio.get_flux_density(l, observing_properties['redshift']).to(u.Jy).value
# calculate raytracing grid
xmax = ((xlim[1] + observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
zmax = ((ylim[1] + observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
if not is_quarter_plane:
xmin = ((xlim[0] - observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
zmin = ((ylim[0] - observing_properties['pixelsize'] * kpc_per_arcsec) / uv.length).si
xstep = (observing_properties['pixelsize'] * kpc_per_arcsec / uv.length).si
zstep = (observing_properties['pixelsize'] * kpc_per_arcsec / uv.length).si
ymax = max(xmax, zmax)
ystep = min(xstep, zstep)
# ystep = ((0.25 * u.kpc) / uv.length).si
if is_quarter_plane:
x = np.arange(0, xmax, xstep)
z = np.arange(0, zmax, zstep)
else:
x = np.arange(0, xmax, xstep)
z = np.arange(zmin, zmax, zstep)
y = np.arange(-ymax, ymax, ystep)
raytraced_flux = np.zeros((x.shape[0], z.shape[0]))
# raytrace surface brightness
raytrace_surface_brightness(
r=d.x1,
theta=d.x2,
x=x,
y=y,
z=z,
original_values=f,
raytraced_values=raytraced_flux
)
raytraced_flux = raytraced_flux * u.Jy
# beam information
area_beam_kpc2 = (np.pi * (sigma_beam * kpc_per_arcsec)
**2).to(u.kpc**2)
beams_per_cell = (((observing_properties['pixelsize'] * kpc_per_arcsec) ** 2) / area_beam_kpc2).si
raytraced_flux /= beams_per_cell
beam_kernel = Gaussian2DKernel(sigma_beam.value)
if do_convolve:
flux = convolve(raytraced_flux.to(u.Jy), beam_kernel, boundary='extend') * u.Jy
else:
flux = raytraced_flux
X1 = x * (uv.length / kpc_per_arcsec).to(u.arcsec).value
X2 = z * (uv.length / kpc_per_arcsec).to(u.arcsec).value
return (X1, X2, flux.to(u.mJy))
def create_alpha_colormap(*, name):
ncolors = 256
color_array = plot.get_cmap(name)(range(ncolors))
color_array[:, -1] = np.linspace(0.0, 1.0, ncolors)
map_object = LinearSegmentedColormap.from_list(name=f'{name}_alpha', colors=color_array)
plot.register_cmap(cmap=map_object)
def main():
parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('simulation_directory', help='Simulation directory', type=str)
parser.add_argument('output_directory', help='Output directory', type=str)
parser.add_argument('outputs', help='Output numbers', type=int, nargs='+')
parser.add_argument('--trc_cutoff', help='Tracer cutoff', type=float, default=1e-14)
parser.add_argument('--redshift', help='Redshift value', type=float, default=0.05)
parser.add_argument('--beamwidth', help='Observing beam width [arcsec]', type=float, default=5)
parser.add_argument('--pixelsize', help='Observing pixel size [arcsec]', type=float, default=1.8)
parser.add_argument('--xlim', help='X limits [kpc]', type=float, nargs=2, default=[-60,60])
parser.add_argument('--ylim', help='Y limits [kpc]', type=float, nargs=2, default=[-60,60])
parser.add_argument('--plot_in_arcsec', help='Plot axes in arsec')
parser.add_argument('--rotation', help='Rotation of output', type=float, default=np.pi / 2)
parser.add_argument('--dpi', help='DPI to save figure at', type=float, default=300)
parser.add_argument('--no_fluff', help='Save the figure without any axes labels, ticks, or titles', action='store_true')
args = parser.parse_args()
# Update observing properties
observing_properties = {
'redshift': args.redshift,
'beamwidth': args.beamwidth * u.arcsec,
'pixelsize': args.pixelsize * u.arcsec,
'xlim': args.xlim * u.kpc,
'ylim': args.ylim * u.kpc,
'kpc2arcsec': 1.0/cosmo.kpc_proper_per_arcmin(args.redshift).to(u.kpc / u.arcsec)
}
# update plot propterties
plot_properties = {
'plot_in_arcsec': args.plot_in_arcsec,
'rotation': args.rotation,
'dpi': args.dpi,
'fluff': not args.no_fluff,
}
# load the simulation information
uv, env, jet = pk.configuration.load_simulation_info(os.path.join(args.simulation_directory, 'config.yaml'))
sim_info = {
'uv': uv,
'env': env,
'jet': jet,
}
print('Generating plots for the following outputs:')
print(args.outputs)
print()
print('Observing propreties are:')
print(f'> r: {observing_properties["redshift"]}, beamwidth: {observing_properties["beamwidth"]}, pixelsize: {observing_properties["pixelsize"]}')
print(f'> xlim: {observing_properties["xlim"]}, ylim: {observing_properties["ylim"]}')
print()
print('The environment and jet properties are:')
print(f'> Environment: {type(env).__name__}, halo mass = {np.log10(env.halo_mass.value)}, central density = {env.central_density}')
print(f'> Jet: power = {jet.Q}, density = {jet.rho_0}, mach number = {jet.M_x}, half-opening angle = {np.rad2deg(jet.theta)}')
print()
# create output directory if needed
pathlib.Path(args.output_directory).mkdir(parents = True, exist_ok = True)
# Let's generate our custom colormaps
create_alpha_colormap(name='Blues')
create_alpha_colormap(name='Reds')
for i in args.outputs:
create_plots(
sim_dir = args.simulation_directory,
plot_dir = args.output_directory,
output_number = i,
sim_info = sim_info,
observing_properties = observing_properties,
plot_properties = plot_properties,
)
if __name__ == '__main__':
main()
# -*- coding: utf-8 -*-
| gpl-3.0 |
isomerase/RoboSkeeter | test_3dheat.py | 2 | 3099 | # import numpy as np
# from scipy import stats
# from mayavi import mlab
# import multiprocessing
# import matplotlib.pyplot as plt
#
# x, y = np.mgrid[-1.0:1.0:30j, -1.0:1.0:30j]
# # Need an (N, 2) array of (x, y) pairs.
# xy = np.column_stack([x.flat, y.flat])
#
# mu = np.array([0.0, 0.0])
#
# sigma = np.array([.025, .025])
# covariance = np.diag(sigma**2)
#
# z = stats.multivariate_normal.pdf(xy, mean=mu, cov=covariance)
#
# # Reshape back to a (30, 30) grid.
# z = z.reshape(x.shape)
#
import numpy as np
from scipy import stats
from mayavi import mlab
import multiprocessing
from matplotlib.cm import get_cmap
values = np.linspace(0., 1., 256)
lut_dict = {}
lut_dict['plasma'] = get_cmap('plasma')(values.copy())
def calc_kde(data):
return kde(data.T)
mu, sigma = 0, 0.01
x = 10*np.random.normal(mu, sigma, 1000)
y = 10*np.random.normal(mu, sigma, 1000)
z = 10*np.random.normal(mu, sigma, 1000)
xyz = np.vstack([x,y,z])
kde = stats.gaussian_kde(xyz)
# Evaluate kde on a grid
xmin, ymin, zmin = x.min(), y.min(), z.min()
xmax, ymax, zmax = x.max(), y.max(), z.max()
xi, yi, zi = np.mgrid[xmin:xmax:30j, ymin:ymax:30j, zmin:zmax:30j]
coords = np.vstack([item.ravel() for item in [xi, yi, zi]])
# Multiprocessing
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
results = pool.map(calc_kde, np.array_split(coords.T, 2)) # TODO: what is this 2?
density = np.concatenate(results).reshape(xi.shape)
# Plot scatter with mayavi
figure = mlab.figure('DensityPlot', bgcolor=(1, 1, 1))
# grid = mlab.pipeline.scalar_field(xi, yi, zi, density)
# min = density.min()
# max=density.max()
# s = mlab.pipeline.volume(grid, vmin=min, vmax=max,) #min + .5*(max-min))
x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
s = np.sin(x*y*z)/(x*y*z)
src = mlab.pipeline.scalar_field(s)
vol = mlab.pipeline.volume(src)
lut = vol.module_manager.scalar_lut_manager.lut.table.to_array()
# The lut is a 256x4 array, with the columns representing RGBA
# (red, green, blue, alpha) coded with integers going from 0 to 255.
# We modify the alpha channel to add a transparency gradient
lut[:, -1] = np.linspace(0, 255, 256)
# and finally we put this LUT back in the surface object. We could have
# added any 255*4 array rather than modifying an existing LUT.
vol.module_manager.scalar_lut_manager.lut.table = lut
# lut = lut_dict['plasma']
# lut[:, -1] = np.linspace(0, 255, 256)
# # lut[:, 0] = np.linspace(0, 255, 256)
#
# vol.module_manager.scalar_lut_manager.lut.table = lut
#
#
#
# # Changing the ctf:
# from tvtk.util.ctf import ColorTransferFunction
# ctf = ColorTransferFunction()
# ctf.add_rgb_point(value, r, g, b)
# ctf.add_hsv_point(value, h, s, v)
# # ...
# vol._volume_property.set_color(ctf)
# vol._ctf = ctf
# vol.update_ctf = True
#
# # Changing the otf:
# from enthought.tvtk.util.ctf import PiecewiseFunction
# otf = PiecewiseFunction()
# otf.add_point(value, opacity)
# self._target._otf = otf
# self._target._volume_property.set_scalar_opacity(otf)
#
# grid.
# surf.module_manager.scalar_lut_manager.lut.tabl
mlab.axes()
mlab.show()
| mit |
alexdemarsh/daft | examples/classic.py | 7 | 1057 | """
The Quintessential PGM
======================
This is a demonstration of a very common structure found in graphical models.
It has been rendered using Daft's default settings for all the parameters
and it shows off how much beauty is baked in by default.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
# Instantiate the PGM.
pgm = daft.PGM([2.3, 2.05], origin=[0.3, 0.3])
# Hierarchical parameters.
pgm.add_node(daft.Node("alpha", r"$\alpha$", 0.5, 2, fixed=True))
pgm.add_node(daft.Node("beta", r"$\beta$", 1.5, 2))
# Latent variable.
pgm.add_node(daft.Node("w", r"$w_n$", 1, 1))
# Data.
pgm.add_node(daft.Node("x", r"$x_n$", 2, 1, observed=True))
# Add in the edges.
pgm.add_edge("alpha", "beta")
pgm.add_edge("beta", "w")
pgm.add_edge("w", "x")
pgm.add_edge("beta", "x")
# And a plate.
pgm.add_plate(daft.Plate([0.5, 0.5, 2, 1], label=r"$n = 1, \cdots, N$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("classic.pdf")
pgm.figure.savefig("classic.png", dpi=150)
| mit |
scalable-networks/ext | gnuradio-3.7.0.1/gr-filter/examples/fft_filter_ccc.py | 5 | 3574 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fft_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw0, bw1, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw0 = bw0
self._bw1 = bw1
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.complex_band_pass_2(1, self._fs,
self._bw0, self._bw1,
self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fft_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-S", "--start-pass", type="eng_float", default=1000,
help="Start of Passband [default=%default]")
parser.add_option("-E", "--end-pass", type="eng_float", default=2000,
help="End of Passband [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fft_filter_ccc(options.nsamples,
options.samplerate,
options.start_pass,
options.end_pass,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-2.0 |
Bobeye/LinkMechanismStewartGouph | V2.0/configure.py | 2 | 40750 | import math
import time
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
# Mechanical Parameters
BOTTOM_RADIUS = 119.3649864910141897009273117677145601037135856257366363864 # distance from the center of the bottom plate to the servo center
TOP_RADIUS = 74.33034373659252761306004106965698325724492756860430780281 # distance from the center of the top plate to the top joint
BOTTOM_ANGLE = 0.546166563433787740559629712911971244663191407124391241530
TOP_ANGLE = 0.343023940420703397073528599413809616687563147674740286598
LINKA = 75.22 # length of the body link connected to the servo, the first part of the link-mechanism leg
LINKB = 120.00# length of the body link connected to the top plate, the second part of the link-mechanism leg
ZEROHEIGHT = 208.8
SERVOHEIGHT = 41.5
SERVOANGLE_MIN = 0 # zero position is corresponding to the z axis in right-handed coordinates system, clockwise moving is considered as above zero
SERVOANGLE_MAX = 90
SERVOSPEED_MAX = 400
SERVOSPEED_MIN = 0
# WORKINGRANGE = [15.0,15.0,15.0,0.15,0.15,0.15]
# COORDINATS & NUMBER TAG:
# The space origin is always following the right-handed coordinats system. The origin is located at the center of the bottom plate.
# Num 0 tag is always referring to the servo located within the third angle projection. The tagging sequence is following the direcion of anti-clockwise,
# which means the tag 1 is reffering to the servo locating on the right side of the servo 0.
class CONFIGURE:
# data check
def PositiveDataCheck(self):
if BOTTOM_RADIUS <= 0 or TOP_RADIUS <= 0 or BOTTOM_ANGLE <= 0 or TOP_ANGLE <= 0 or LINKA <= 0 or LINKB <= 0:
print("Warning! Strcture dimensions must be positive!")
def OriginPosition(self):
BottomCoordinates = [[BOTTOM_RADIUS * math.cos(BOTTOM_ANGLE), -BOTTOM_RADIUS * math.sin(BOTTOM_ANGLE), 0],
[BOTTOM_RADIUS * math.cos(BOTTOM_ANGLE), BOTTOM_RADIUS * math.sin(BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)-BOTTOM_ANGLE), BOTTOM_RADIUS * math.cos(math.radians(30)-BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)+BOTTOM_ANGLE), BOTTOM_RADIUS * math.cos(math.radians(30)+BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)+BOTTOM_ANGLE), -BOTTOM_RADIUS * math.cos(math.radians(30)+BOTTOM_ANGLE), 0],
[-BOTTOM_RADIUS * math.sin(math.radians(30)-BOTTOM_ANGLE), -BOTTOM_RADIUS * math.cos(math.radians(30)-BOTTOM_ANGLE), 0]]
# print('BottomCoordinates = ',BottomCoordinates)
TopCoordinates = [[TOP_RADIUS * math.cos(TOP_ANGLE), -TOP_RADIUS * math.sin(TOP_ANGLE), ZEROHEIGHT],
[TOP_RADIUS * math.cos(TOP_ANGLE), TOP_RADIUS * math.sin(TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)-TOP_ANGLE), TOP_RADIUS * math.cos(math.radians(30)-TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)+TOP_ANGLE), TOP_RADIUS * math.cos(math.radians(30)+TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)+TOP_ANGLE), -TOP_RADIUS * math.cos(math.radians(30)+TOP_ANGLE), ZEROHEIGHT],
[-TOP_RADIUS * math.sin(math.radians(30)-TOP_ANGLE), -TOP_RADIUS * math.cos(math.radians(30)-TOP_ANGLE), ZEROHEIGHT]]
# print('TopCoordinates = ',TopCoordinates)
ServoCoordinates = BottomCoordinates
for i in range(6):
ServoCoordinates[i][2] = SERVOHEIGHT
# print('ServoCoordinates',ServoCoordinates)
InitialCoordinates = [BottomCoordinates, TopCoordinates, ServoCoordinates]
return InitialCoordinates
def TopplateMotion(self, TopCoordinates, TopMotion):
TempTop = TopCoordinates
temptopz = TempTop[0][2]
for i in range(6):
TempTop[i][2] = 0.0
Top = TempTop
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j]
Top[i][2] = Top[i][2] + temptopz
# print('After-Motion Top plate Coordinates', Top)
return Top
def LegLength(self, AimTopplate, ServoCoordinates):
# Calculate leg length
LegLength = [0.0,1.0,2.0,3.0,4.0,5.0]
for i in range(6):
TempDistance = 0.0
for j in range(3):
TempDistance = TempDistance + ((AimTopplate[i][j]-ServoCoordinates[i][j])**2)
LegLength[i] = math.sqrt(TempDistance)
# print('Leglength = ', LegLength)
return LegLength
def InverseKinematics(self, AimTopplate, ServoCoordinates, LinkA, LinkB):
# Calculate leg length
LegLength = [0.0,1.0,2.0,3.0,4.0,5.0]
for i in range(6):
TempDistance = 0.0
for j in range(3):
TempDistance = TempDistance + ((AimTopplate[i][j]-ServoCoordinates[i][j])**2)
LegLength[i] = math.sqrt(TempDistance)
# print('Leglength = ', LegLength)
# Calculate leg direction
LegAngle = AimTopplate
TempLegAngle = AimTopplate
for i in range(6):
for j in range(3):
LegAngle[i][j] = AimTopplate[i][j] - ServoCoordinates[i][j]
TempLegAngle[i][j] = LegAngle[i][j]
# LegAngle[i][0], LegAngle[i][1] = LegAngle[i][1], -LegAngle[i][0] # Switch the coordinates system from the right-handed to a standard 2D coordinates
# print('LegAngle', LegAngle)
YT = range(6)
ZT = range(6)
for i in range(6):
ZT[i] = LegAngle[i][2]
if i <= 1:
YT[i] = LegAngle[i][1]
elif i == 2:
axisrot = math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 3:
axisrot = math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 4:
axisrot = -math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
elif i == 5:
axisrot = -math.pi*2/3
ca = math.cos(axisrot)
sa = math.sin(axisrot)
x0 = LegAngle[i][0]
y0 = LegAngle[i][1]
YT[i] = y0 * ca - x0 * sa
# print('YT', YT)
# print('ZT', ZT)
ALPHA = [0.0,1.0,2.0,3.0,4.0,5.0]
AimServoAngle = [0.0,1.0,2.0,3.0,4.0,5.0]
# Motion Planning
for i in range(6):
M = ((LegLength[i] ** 2) + (LinkA ** 2) - (LinkB ** 2)) / (2 * LinkA * ZT[i])
N = YT[i] / ZT[i]
# print('M', M)
# print('N', N)
# cos(alpha) has two results
alpha = 0
if i % 2 == 1:
Alphaa = (M * N + (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
Alphab = (M * N - (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
alphaa = math.acos(Alphaa)
alphab = math.acos(Alphab)
# print('a', alphaa)
# print('b', alphab)
if abs(alphaa) <= 1.5708:
alpha = alphaa
elif abs(alphab) <= 1.5708:
alpha = alphab
ALPHA[i] = alpha
AimServoAngle[i] = 90 - math.degrees(ALPHA[i])
else:
Alphaa = (-(M * N) + (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
Alphab = (-(M * N) - (math.sqrt((N**2) - (M**2) + 1.0))) / (N**2 + 1.0)
alphaa = math.acos(Alphaa)
alphab = math.acos(Alphab)
# print('a', alphaa)
# print('b', alphab)
if abs(alphaa) <= 1.5708:
alpha = alphaa
elif abs(alphab) <= 1.5708:
alpha = alphab
ALPHA[i] = alpha
AimServoAngle[i] = 90 - math.degrees(ALPHA[i])
# print('ALPHA', ALPHA)
# print('AimServoAngle = ', AimServoAngle)
return AimServoAngle
def MonteCarlo(self):
sampleResolution = 12.0
sampleStep = 4.0
sampleNum = int((sampleResolution*2+1)/sampleStep)**6
# Error range set
# deltaTopplate = [0.1,0.1,0.1,0.1,0.1,0.1] # angls are in degree!!!!!
# deltaTopplate = [1.0,0.0,0.0,0.0,0.0,0.0] # angls are in degree!!!!!
# deltaTopplate = [0.0,1.0,0.0,0.0,0.0,0.0] # angls are in degree!!!!!
# deltaTopplate = [0.0,0.0,1.0,0.0,0.0,0.0] # angls are in degree!!!!!
deltaTopplate = [0.19,0.28,0.29,0.063,0.063,0.2] # angls are in degree!!!!!
# Random
sampleList = [[0],[0],[0],[0],[0],[0]]
sampleTopplate = [0,0,0,0,0,0]
tempsampleList = [0]
for i in range(6):
tempsampleList = np.random.uniform(-deltaTopplate[i],deltaTopplate[i],sampleNum)
for j in range(sampleNum):
sampleList[i].append(tempsampleList[j])
sampleList[i].pop
for i in [3,4,5]:
for j in range(len(sampleList[i])):
sampleList[i][j] = math.radians(sampleList[i][j])
# print('sampleList',sampleList)
print('MonteCarlo sampleNum:', sampleNum)
return sampleList
def ForwardKinematics(sefl, ServoAngle, ServoCoordinates, TopCoordinates, ZeroTopplate, LinkA, LinkB, DBP):
# Degree to radius
for i in range(6):
ServoAngle[i] = math.radians(ServoAngle[i])
# Define the position of the universal joint between LINKA and LINKB
UniversalJointAB = ServoCoordinates
UniversalJointAB = [ [ServoCoordinates[0][0] , ServoCoordinates[0][1]-(LINKA*math.sin(ServoAngle[0])) , ServoCoordinates[0][2]+(LINKA*math.cos(ServoAngle[0]))],
[ServoCoordinates[1][0] , ServoCoordinates[1][1]+(LINKA*math.sin(ServoAngle[1])) , ServoCoordinates[1][2]+(LINKA*math.cos(ServoAngle[1]))],
[ServoCoordinates[2][0]+(LINKA*math.sin(ServoAngle[2])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[2][1]+(LINKA*math.sin(ServoAngle[2])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[2][2]+(LINKA*math.cos(ServoAngle[2]))],
[ServoCoordinates[3][0]-(LINKA*math.sin(ServoAngle[3])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[3][1]-(LINKA*math.sin(ServoAngle[3])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[3][2]+(LINKA*math.cos(ServoAngle[3]))],
[ServoCoordinates[4][0]-(LINKA*math.sin(ServoAngle[4])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[4][1]+(LINKA*math.sin(ServoAngle[4])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[4][2]+(LINKA*math.cos(ServoAngle[4]))],
[ServoCoordinates[5][0]+(LINKA*math.sin(ServoAngle[5])*math.cos(BOTTOM_ANGLE)) , ServoCoordinates[5][1]-(LINKA*math.sin(ServoAngle[5])*math.sin(BOTTOM_ANGLE)) , ServoCoordinates[5][2]+(LINKA*math.cos(ServoAngle[5]))]]
# print('UniversalJointAB:', UniversalJointAB)
# Check LINKA's working range
def CrossProduct(V1,V2): # cross product of two vectors
for i in range(3):
crossproduct = [V1[1]*V2[2]-V2[1]*V1[2],V1[0]*V2[2]-V2[0]*V1[2],V1[0]*V2[1]-V1[1]*V2[0]]
return crossproduct
def CCW(A,B,C): # See if three points are listed counter clock wise
SegAB = [0,0,0]
SegAC = [0,0,0]
for i in range(3):
SegAB[i] = B[i] - A[i]
SegAC[i] = C[i] - A[i]
if CrossProduct(SegAB,SegAC)[2] > 0:
return True
else:
return False
def Intersect(PA1,PA2,PB1,PB2): # See if line segment PA1-PA2 and PB1-PB2 interacts, TRUE for intersect
return CCW(PA1,PB1,PB2) != CCW(PA2,PB1,PB2) and CCW(PA1,PA2,PB1) != CCW(PA1,PA2,PB2)
def Coplanar(A,B,C,D): # See if four points are coplanar
SegAB = [0,0,0]
SegAC = [0,0,0]
SegAD = [0,0,0]
for i in range(3):
SegAB[i] = B[i] - A[i]
SegAC[i] = C[i] - A[i]
SegAD[i] = D[i] - A[i]
coplanarVec = CrossProduct(CrossProduct(SegAB,SegAC),CrossProduct(SegAB,SegAD))
if coplanarVec[0] == 0 and coplanarVec[1] == 0 and coplanarVec[2] == 0:
return True
else:
return False
# first, see if the segment points of the two links are coplanar, second, see if the two links are interacting
for i in range(6):
if i < 5:
if Coplanar(ServoCoordinates[i],UniversalJointAB[i],ServoCoordinates[i+1],UniversalJointAB[i+1]) == True:
if Intersect(ServoCoordinates[i],UniversalJointAB[i],ServoCoordinates[i+1],UniversalJointAB[i+1]) == True:
print("Warning! Links have intersetions!!!")
else:
print("Links are safe to go!")
else:
if Coplanar(ServoCoordinates[5],UniversalJointAB[5],ServoCoordinates[0],UniversalJointAB[0]) == True:
if Intersect(ServoCoordinates[5],UniversalJointAB[5],ServoCoordinates[0],UniversalJointAB[0]) == True:
print("Warning! Links have intersetions!!!")
else:
print("Links are safe to go!")
# Newton-Raphson Method
print('Newton-Raphson is on!!!')
print('Initial Top Plate = ', TopCoordinates)
print('Initial Servo Plate = ', ServoCoordinates)
print('Given servo angle = ', ServoAngle)
print('UniversalJointAB pos = ', UniversalJointAB)
def F(TopCoordinates,TopMotion,UniversalJointAB,LinkB):
F = [0.00000000,0.000000000,0.0000000000,0.00000000000,0.0000000000,0.0000000000]
TempTop = TopCoordinates
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j] - UniversalJointAB[i][j]
F[i] = math.sqrt(Top[i][0] ** 2 + Top[i][1] ** 2 + Top[i][2] ** 2) - LinkB
return F
# TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# F = F(TopCoordinates,TopMotion,UniversalJointAB,LinkB)
# print('text F result', F)
def f(TopCoordinates,TopMotion,UniversalJointAB,LinkB):
TempTop = TopCoordinates
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
RotationM = [[C(gamma) * C(belta) , -S(gamma) * C(alpha) + C(gamma) * S(belta) * S(alpha) , S(gamma) * S(alpha) + C(gamma) * S(belta) * C(alpha)],
[S(gamma) * C(belta) , C(gamma) * C(alpha) + S(gamma) * S(belta) * S(alpha) , -C(gamma) * S(alpha) + S(gamma) * S(belta) * C(alpha)],
[-S(belta) , C(belta) * S(alpha) , C(belta) * C(alpha)]]
TranslationM = [deltaX , deltaY, deltaZ]
for i in range(6):
for j in range(3):
Top[i][j] = RotationM[j][0] * TempTop[i][0] + RotationM[j][1] * TempTop[i][1] + RotationM[j][2] * TempTop[i][2] + TranslationM[j] - UniversalJointAB[i][j]
f = Top
return f
def dF(TopCoordinates,TopMotion):
dF = [[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],
[[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]],[[0.0],[0.0],[0.0]]],]
Top = TopCoordinates
deltaX = TopMotion[0]
deltaY = TopMotion[1]
deltaZ = TopMotion[2]
alpha = TopMotion[3]
belta = TopMotion[4]
gamma = TopMotion[5]
def S(angle):
return math.sin(angle)
def C(angle):
return math.cos(angle)
for i in range(6):
# d(f)/d(deltaX) Y Z
dF[i][0] = [1.0,0.0,0.0]
dF[i][1] = [0.0,1.0,0.0]
dF[i][2] = [0.0,0.0,1.0]
# d(f)/d(alpha)
dF[i][3] = [S(gamma)*S(alpha)*Top[i][1] + C(gamma)*S(belta)*C(alpha)*Top[i][1] + S(gamma)*C(alpha)*Top[i][2] - C(gamma)*S(belta)*S(alpha)*Top[i][2],
-C(gamma)*S(alpha)*Top[i][1] + S(gamma)*S(belta)*C(alpha)*Top[i][1] - C(gamma)*C(alpha)*Top[i][2] - S(gamma)*S(belta)*S(alpha)*Top[i][2],
C(belta)*C(alpha)*Top[i][1] - C(belta)*S(alpha)*Top[i][2]]
# d(f)/d(belta)
dF[i][4] = [-C(gamma)*S(belta)*Top[i][0] + C(gamma)*C(belta)*S(alpha)*Top[i][1] + C(gamma)*C(belta)*C(alpha)*Top[i][2],
-S(gamma)*S(belta)*Top[i][0] + S(gamma)*C(belta)*S(alpha)*Top[i][1] + S(gamma)*C(belta)*C(alpha)*Top[i][2],
-C(belta)*Top[i][0] - S(belta)*S(alpha)*Top[i][1] - S(belta)*C(alpha)*Top[i][2]]
# d(f)/d(gamma)
dF[i][5] = [-S(gamma)*C(belta)*Top[i][0] - C(gamma)*C(alpha)*Top[i][1] - S(gamma)*S(belta)*S(alpha)*Top[i][1] + C(gamma)*S(alpha)*Top[i][2] - S(gamma)*S(belta)*C(alpha)*Top[i][2],
C(gamma)*C(belta)*Top[i][0] - S(gamma)*C(alpha)*Top[i][1] + C(gamma)*S(belta)*S(alpha)*Top[i][1] + S(gamma)*S(alpha)*Top[i][2] + C(gamma)*S(belta)*C(alpha)*Top[i][2],
0]
return dF
# TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# dF = dF(TopCoordinates,TopMotion)
# print('text dF result', dF)
# NewtonRaphson: # Xn+1 = Xn - f(Xn)/df(Xn)
resolution = 0.1
count = 1
start = time.time()
CurrentTopMotion = [0.0,0.0,0.0,0.0,0.0,0.0]
NextTopMotion = [0.0,0.0,0.0,0.0,0.0,0.0]
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
F0 = F(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
dF0 = dF(TopCoordinates,CurrentTopMotion)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
f0 = f(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
for i in range(6): # [deltaX, deltaY, deltaZ, alpha, belta, gamma]
Sum = 0.0
for j in range(6): # leg 0 ,1 ,2 3 4 5
Sum = Sum + ( F0[j] / (2 * (dF0[j][i][0] * f0[j][0] + dF0[j][i][1] * f0[j][1] + dF0[j][i][2] * f0[j][2])) )
NextTopMotion[i] = CurrentTopMotion[i] - Sum
print ('NextTopMotion = ', NextTopMotion)
print ('TP', TopCoordinates)
F1 = F(TopCoordinates,NextTopMotion,UniversalJointAB,LinkB)
print('PreviousF: ', F0)
print('NextF: ', F1)
# Permit = 0
# for i in range(6):
# if abs(F1[i]) <= resolution:
# Permit = Permit + 1
# while Permit < 6:
Sum = 0.0
for i in range(6):
Sum = Sum + F1[i]
while Sum >= resolution:
count = count + 1
CurrentTopMotion = NextTopMotion
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
F0 = F(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
dF0 = dF(TopCoordinates,CurrentTopMotion)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
f0 = f(TopCoordinates,CurrentTopMotion,UniversalJointAB,LinkB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
for i in range(6): # [deltaX, deltaY, deltaZ, alpha, belta, gamma]
Sum = 0.0
for j in range(6): # leg 0 ,1 ,2 3 4 5
Sum = Sum + ( F0[j] / (2 * (dF0[j][i][0] * f0[j][0] + dF0[j][i][1] * f0[j][1] + dF0[j][i][2] * f0[j][2])) )
NextTopMotion[i] = CurrentTopMotion[i] - Sum
print ('NextTopMotion = ', NextTopMotion)
print ('TP', TopCoordinates)
F1 = F(TopCoordinates,NextTopMotion,UniversalJointAB,LinkB)
print('PreviousF: ', F0)
print('NextF: ', F1)
Sum = 0.0
for i in range(6):
Sum = Sum + F1[i]
# Permit = 0
# for i in range(6):
# if F1[i] <= resolution:
# Permit = Permit + 1
end = time.time()
print ('Iteration Period: ', count, 'Total Time', end-start)
print ('Aim Topplate Motion: ', NextTopMotion)
# # NewtonRaphson(TC0,SC,SA,LinkA,LinkB,DBP): # Xn+1 = Xn - f(Xn)/df(Xn)
# def FKFunction(TC0,SC,TPM0,SA,LinkA,LinkB,DBP):
# # given servo angle, return position of the universal joint between linkA & linkB
# LC = [[SC[0][0] , SC[0][1]-LinkA*math.sin(SA[0]) , SC[0][2]+LinkA*math.cos(SA[0])],
# [SC[1][0] , SC[1][1]+LinkA*math.sin(SA[1]) , SC[1][2]+LinkA*math.cos(SA[1])],
# [SC[2][0]+LinkA*math.sin(SA[2])*math.cos(DBP) , SC[2][1]+LinkA*math.sin(SA[2])*math.sin(DBP) , SC[2][2]+LinkA*math.cos(SA[2])],
# [SC[3][0]-LinkA*math.sin(SA[3])*math.cos(DBP) , SC[3][1]-LinkA*math.sin(SA[3])*math.sin(DBP) , SC[3][2]+LinkA*math.cos(SA[3])],
# [SC[4][0]-LinkA*math.sin(SA[4])*math.cos(DBP) , SC[4][1]+LinkA*math.sin(SA[4])*math.sin(DBP) , SC[4][2]+LinkA*math.cos(SA[4])],
# [SC[5][0]+LinkA*math.sin(SA[5])*math.cos(DBP) , SC[5][1]-LinkA*math.sin(SA[5])*math.sin(DBP) , SC[5][2]+LinkA*math.cos(SA[5])],]
# tc0 = TC0
# # define topplate inside FK def
# def FKTopplateMotion(TC, TopMotion):
# def FKTPTranslation(TranslationMatrix,TempTop):
# for i in range(6):
# TempTop[i][0] = TempTop[i][0] + TranslationMatrix[0]
# TempTop[i][1] = TempTop[i][1] + TranslationMatrix[1]
# TempTop[i][2] = TempTop[i][2] + TranslationMatrix[2]
# # print("Temp Topplate after translation: ", TempTop)
# return TempTop
# def FKTPRotation(RotationMatrix,TempTop):
# def QUATERNIONS_ROTATION(vector, axis, angle):
# # 4-matrix multiply vector
# def QUATERNIONS_MULTIPLY(qm0,qv0):
# qm = [0,0,0]
# for i in range(3):
# for j in range(3):
# qm[i] = qm[i] + qm0[i][j] * qv0[j]
# return qm
# v = 1-math.cos(angle)
# c = math.cos(angle)
# s = math.sin(angle)
# x = axis[0]
# y = axis[1]
# z = axis[2]
# RotQuaternions = [ [1-v*((y*y)+(z*z)) , x*y*v-z*s , x*z*v+y*s] ,
# [x*y*v+z*s , 1-v*((x*x)+(z*z)) , y*z*v-x*s] ,
# [x*z*v-y*s , y*z*v+x*s , 1-v*((x*x)+(y*y))] ]
# return QUATERNIONS_MULTIPLY(RotQuaternions,vector)
# for i in range(6):
# TempTop[i][2] = TempTop[i][2] - ZEROHEIGHT
# AfterRotTop = TempTop
# alpha = RotationMatrix[0]
# belta = RotationMatrix[1]
# gamma = RotationMatrix[2]
# if alpha != 0:
# for i in range(6):
# AfterRotTop[i] = QUATERNIONS_ROTATION(TempTop[i], [1,0,0], alpha)
# TempTop = AfterRotTop
# if belta != 0:
# for i in range(6):
# AfterRotTop[i] = QUATERNIONS_ROTATION(TempTop[i], [0,1,0], belta)
# TempTop = AfterRotTop
# if gamma != 0:
# for i in range(6):
# AfterRotTop[i] = QUATERNIONS_ROTATION(TempTop[i], [0,0,1], gamma)
# TempTop = AfterRotTop
# for i in range(6):
# TempTop[i][2] = TempTop[i][2] + ZEROHEIGHT
# # print("Temp Topplate after rotation: ", TempTop)
# return TempTop
# TempTopCo = TC
# deltaX = TopMotion[0]
# deltaY = TopMotion[1]
# deltaZ = TopMotion[2]
# angleX = TopMotion[3]
# angleY = TopMotion[4]
# angleZ = TopMotion[5]
# if deltaX != 0 or deltaY != 0 or deltaZ != 0:
# TempTopCo = FKTPTranslation([deltaX, deltaY, deltaZ], TempTopCo)
# if angleX != 0 or angleY != 0 or angleZ != 0:
# TempTopCo = FKTPRotation([angleX, angleY, angleZ], TempTopCo)
# # print('After-Motion Top plate Coordinates', TempTopCo)
# return TempTopCo
# tempTP = FKTopplateMotion(tc0, TPM0)
# # print('temp top plate', tempTP)
# fkfunction = [0,0,0,0,0,0]
# for i in range(6):
# tempD = 0.0
# for j in range(3):
# tempD = tempD + ((tempTP[i][j]-LC[i][j])**2)
# tempD = math.sqrt(tempD)
# fkfunction[i] = tempD - LinkB
# # print('Tempdistance between temp topplate and Link Joint ', fkfunction)
# return fkfunction
# F0 = [0.00000000,0.000000000,0.0000000000,0.00000000000,0.0000000000,0.0000000000]
# FK0 = FKFunction(TC0,SC,F0,SA,LinkA,LinkB,DBP)
# print('FK0', FK0)
# J0 = LocalDNR(F0,TC0,SC,SA,LinkA,LinkB,DBP)
# # print('F0', F0)
# F1 = NRfunction(F0,FK0,J0)
# F0 = [0.00000000,0.000000000,0.0000000000,0.00000000000,0.0000000000,0.0000000000]
# print('F0', F0)
# print('F1', F1)
# print('Initial Top Plate = ', TC0)
# PERMIT = 0
# PeriodNum = 0
# # F0 = F1
# # print('F0', F0)
# # print('F1', F1)
# # TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# # J0 = LocalDNR(F0,TC0,SC,SA,LinkA,LinkB,DBP)
# # TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# # FK0 = FKFunction(TC0,SC,F0,SA,LinkA,LinkB,DBP)
# # F1 = NRfunction(F0,FK0,J0)
# # TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# # print('Initial Top Plate = ', TC0)
# # FK0 = FKFunction(TC0,SC,F0,SA,LinkA,LinkB,DBP)
# # TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# # PeriodNum = PeriodNum + 1
# # print('F1', F1)
# # print('FK0', FK0)
# # print('iteration', PeriodNum)
# while PERMIT != 6:
# FK1 = FKFunction(TC0,SC,F1,SA,LinkA,LinkB,DBP)
# print('Initial Top Plate = ', TC0)
# print('F1', F1)
# # SAVE TC0
# TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# print('Initial Top Plate = ', TC0)
# DELTA = 0
# print('Initial Top Plate = ', TC0)
# for i in range(6):
# if abs(FK1[i]) <= NR:
# PERMIT = PERMIT + 1
# else:
# F0 = F1
# TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# J0 = LocalDNR(F0,TC0,SC,SA,LinkA,LinkB,DBP)
# TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# FK0 = FKFunction(TC0,SC,F0,SA,LinkA,LinkB,DBP)
# F1 = NRfunction(F0,FK0,J0)
# TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# print('Initial Top Plate = ', TC0)
# FK0 = FKFunction(TC0,SC,F0,SA,LinkA,LinkB,DBP)
# TC0 = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
# PeriodNum = PeriodNum + 1
# print('F0', F0)
# print('F1', F1)
# print('FK0', FK0)
# print('iteration', PeriodNum)
# NRTopPlateMotion = F1
# print('TPM', NRTopPlateMotion)
# initial x0 setting
# FKTopPlateMotion = [0.0,0.0,0.0,0.0,0.0,0.0]
# TPM0 = [0.0,0.0,0.0,0.0,0.0,0.0]
# DTPM0 = [0.0,0.0,0.0,0.0,0.0,0.0]
# FK0 = FKFunction(TC0,SC,TPM0,SA,LinkA,LinkB,DBP)
# FK = FKFunction(TC0,SC,TPM0,SA,LinkA,LinkB,DBP)
# LDFK = [0.0,0.0,0.0,0.0,0.0,0.0]
# TPM = [0.0,0.0,0.0,0.0,0.0,0.0]
# PERMIT = 0
# while PERMIT != 1:
# # Xn+1 - Xn
# DELTA = 0
# tpmvalue = 0.0
# for k in range(6):
# DELTA = DELTA + (abs(FK[k]-FK0[k]))**2
# tpmvalue = tpmvalue + (TPM[k]**2)
# DELTA = math.sqrt(DELTA)
# print('DELTA', DELTA)
# if DELTA <= NR and tpmvalue != 0:
# PERMIT = 1
# else:
# TPM0 = TPM
# DTPM0 = TPM0
# for q in range(6):
# FK0 = FKFunction(TC0,SC,TPM0,SA,LinkA,LinkB,DBP)
# DTPM0[q] = TPM0[q] + DR
# DFK = FKFunction(TC0,SC,DTPM0,SA,LinkA,LinkB,DBP)
# for l in range(6):
# LDFK[l] = LDFK[l] + LocalD(FK0,DFK,DR)[l]
# for i in range(6):
# TPM[i] = TPM0[i] - (FK0[i]/LDFK[i])
# FK = FKFunction(TC0,SC,TPM,SA,LinkA,LinkB,DBP)
# print('TPM0', TPM0)
# FKTopPlateMotion = TPM0
# return FKTopPlateMotion
# NewtonRaphson(TC0,SC,SA,LinkA,LinkB,DBP)
# Iteration and period cal
# def Singularity(self):
def main():
# 1
# initial the configure class
configure = CONFIGURE()
# 2
# Initial coordinates setup
InitialCordinates=configure.OriginPosition()
BottomCoordinates=InitialCordinates[0]
TopCoordinates=InitialCordinates[1]
ServoCoordinates=InitialCordinates[2]
# 3
# # Move the TOP PLATE
# TopMotion = [0.0,0.0,0.0,0.0,0.0,0.0] # Angle in radius
# AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotion)
# 4
# Inverse Kinematics
InitialCordinates=configure.OriginPosition()
BottomCoordinates=InitialCordinates[0]
TopCoordinates=InitialCordinates[1]
ServoCoordinates=InitialCordinates[2]
TopMotion = [0.0,0.0,0.0,0.0,0.0,-0.36] # Angle in radius, given desired topplate motion
AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotion)
AimServoPos = configure.InverseKinematics(AimTopplate, ServoCoordinates, LINKA, LINKB)
print(AimServoPos) # in degrees
# 5
# MonteCarlo Accuracy Analysis
# Move top to zero
# ZeroTopMotion = [0.1,0.1,0.1,0.0,0.0,0.0] # Angle in radius
# ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
InitialCordinates=configure.OriginPosition()
BottomCoordinates=InitialCordinates[0]
TopCoordinates=InitialCordinates[1]
ServoCoordinates=InitialCordinates[2]
print('top',TopCoordinates)
ZeroTopMotion = [0.1,0.0,0.0,0.0,0.0,0.0] # Angle in radius
ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
ZeroLegLength = configure.LegLength(ZeroAimTopplate, ServoCoordinates)
ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
print('ZeroPos', ZeroAimServoPos)
# ZeroTopMotion = [0.1,0.1,0.1,0.0,0.0,0.0] # Angle in radius
# ZeroAimTopplate = configure.TopplateMotion(TopCoordinates, ZeroTopMotion)
# ZeroAimServoPos = configure.InverseKinematics(ZeroAimTopplate, ServoCoordinates, LINKA, LINKB)
# print(ZeroAimServoPos)
# Monte Carlo
sampleTopplate = configure.MonteCarlo()
for i in range(len(sampleTopplate)):
for j in range(6):
sampleTopplate[i][j] = sampleTopplate[i][j] + ZeroTopMotion[j]
sampleLegLength = [ZeroLegLength]
TopMotionList = [ZeroTopMotion]
AimTopplateList = [ZeroAimTopplate]
AimServoPosList = [ZeroAimServoPos]
for i in range(len(sampleTopplate[0])):
TopMotion = [sampleTopplate[0][i],sampleTopplate[1][i],sampleTopplate[2][i],sampleTopplate[3][i],sampleTopplate[4][i],sampleTopplate[5][i]]
TopMotionList.append(TopMotion)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
for i in range(len(TopMotionList)):
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
AimTopplate = configure.TopplateMotion(TopCoordinates, TopMotionList[i])
InitialCordinates=configure.OriginPosition()
TopCoordinates=InitialCordinates[1]
AimTopplateList.append(AimTopplate)
# Leg Length Analysis
for i in range(len(AimTopplateList)):
LegLength = configure.LegLength(AimTopplateList[i], ServoCoordinates)
sampleLegLength.append(LegLength)
# Servo Angle Analysis
for i in range(1,len(AimTopplateList)):
AimServoPos = configure.InverseKinematics(AimTopplateList[i], ServoCoordinates, LINKA, LINKB)
TopCoordinates = [[70.0000000000026, -24.999999999992898, 208.79999999999063], [69.9999999999976, 25.000000000007095, 208.79999999999563], [-13.349364905396241, 73.12177826490947, 208.80000000000877], [-56.65063509461567, 48.12177826490514, 208.80000000001058], [-56.65063509460605, -48.121778264916266, 208.80000000000098], [-13.349364905381618, -73.12177826491194, 208.79999999999416]]
InitialCordinates=configure.OriginPosition()
ServoCoordinates=InitialCordinates[2]
AimServoPosList.append(AimServoPos)
# print('Aim Servo Position', AimServoPosList)
sampleServoAngle = [[0],[0],[0],[0],[0],[0]]
for i in range(len(AimServoPosList)):
for j in range(6):
sampleServoAngle[j].append(AimServoPosList[i][j])
# print('Aim Servo Angle Position for each leg', sampleServoAngle)
TempsampleServoAngle = sampleServoAngle
for i in range(6):
sampleServoAngle[i] = sorted(sampleServoAngle[i])
# MC accuracy data analysis
goodCount = [0.0,0.0,0.0,0.0,0.0,0.0]
goodRatio = [0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(6):
for angle in sampleServoAngle[i]:
if angle <= ZeroAimServoPos[i] + 0.5 and angle >= ZeroAimServoPos[i] - 0.5:
goodCount[i] = goodCount[i] + 1.0
goodRatio[i] = goodCount[i] / len(sampleServoAngle[i])
print('Accuracy rate is:' ,goodRatio)
for i in range(6):
sampleServoAngle[i] = sampleServoAngle[i][1:len(sampleServoAngle[i])-1]
# leg 0 handle
minl0 = sampleServoAngle[0][0]
maxl0 = sampleServoAngle[0][len(sampleServoAngle[0])-1]
resolution = (maxl0-minl0) / 1000
leglist = [0]
legcount = [0]
l0 = minl0
i = 0
while l0 < maxl0 and i < len(sampleServoAngle[0])-10:
countl0 = 0
# print(sampleServoAngle[0][i])
while sampleServoAngle[0][i] < (l0 + resolution):
countl0 = countl0+1
i = i + 1
legcount.append(countl0)
leglist.append(l0)
l0 = l0 + resolution
print(len(legcount))
print(len(leglist))
# # Normal distribution
# Scount = [0]
# Mlength = np.median(sampleServoAngle[0])
# resolution = 0.01
# limit = 0.6
# Slength = [0]
# print(sampleServoAngle[0][0])
# for i in range(len(sampleServoAngle[0])):
# if sampleServoAngle[0][i] <=
plt.figure(1) # MC accuracy analysis figure
plt.title('MonteCarlo Accuracy Analysis -- Leg Length Accuracy')
plt.subplot(211)
plt.grid(True)
plt.ylabel('Topplate Position')
plt.xlabel('Sample Number')
samplePoints = plt.plot(TopMotionList,'.')
plt.setp(samplePoints, color='y')
# plt.axis([170,185,0, len(sampleLegLength)])
plt.subplot(212)
plt.grid(True)
plt.ylabel('Sample Number')
plt.xlabel('Leg Length/mm')
samplePoints = plt.plot(sampleLegLength,range(len(sampleLegLength)),'.')
plt.setp(samplePoints, color='g')
plt.axis([np.median(sampleLegLength)*0.98,np.median(sampleLegLength)*1.02,0, len(sampleLegLength)])
plt.figure(2) # MC accuracy analysis figure
plt.title('MonteCarlo Accuracy Analysis -- Servo Angle Accuracy')
for i in range(6):
plt.subplot(611 + i)
plt.grid(True)
plt.xlabel('Angle-Leg/degree')
samplePoints = plt.plot(sampleServoAngle[i],range(len(sampleServoAngle[i])),'.')
plt.setp(samplePoints, color='r')
plt.axis([sampleServoAngle[i][0], sampleServoAngle[i][len(sampleServoAngle[0])-1], 0, len(sampleServoAngle[i])])
plt.figure(3)
plt.title('Monte-Carlo Accuracy Analysis -- #0 Servo Angle Accuracy')
plt.grid(True)
plt.ylabel('SampleNumber')
plt.xlabel('Servo Angle')
samplePoints = plt.plot(leglist,legcount,'*')
plt.setp(samplePoints, color='r')
plt.axis([minl0, maxl0, 0, max(legcount)*1.01])
plt.show()
# 6
# # Forward Kinematics Calculation
# InitialCordinates=configure.OriginPosition()
# BottomCoordinates=InitialCordinates[0]
# TopCoordinates=InitialCordinates[1]
# ServoCoordinates=InitialCordinates[2]
# ZeroTopplate = AimTopplate
# ServoAngle = [25.4388,25.4388,25.4388,25.4388,25.4388,25.4388] # degree
# # ServoAngle = [0.0,0.0,0.0,0.0,0.0,0.0] # degree
# configure.ForwardKinematics(ServoAngle, ServoCoordinates, TopCoordinates, ZeroTopplate, LINKA, LINKB, BOTTOM_ANGLE)
main() | gpl-3.0 |
btabibian/scikit-learn | sklearn/model_selection/_validation.py | 3 | 38786 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
predictions_ = np.zeros((X_test.shape[0], n_classes))
if method == 'decision_function' and len(estimator.classes_) == 2:
predictions_[:, estimator.classes_[-1]] = predictions
else:
predictions_[:, estimator.classes_] = predictions
predictions = predictions_
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == 'True'.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/tools/print_version.py | 23 | 7951 | #!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas, ['version', 'version']))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
| bsd-3-clause |
madjelan/scipy2015_tutorial | check_env.py | 6 | 2002 | problems = 0
try:
import IPython
print('IPython', IPython.__version__)
assert(IPython.__version__ >= '3.0')
except ImportError:
print("IPython version 3 is not installed. Please install via pip or conda.")
problems += 1
try:
import numpy
print('NumPy', numpy.__version__)
assert(numpy.__version__ >= '1.9')
except ImportError:
print("Numpy version 1.9 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import pandas
print('pandas', pandas.__version__)
assert(pandas.__version__ >= '0.16')
except ImportError:
print("pandas version 0.16 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import scipy
print('SciPy', scipy.__version__)
except ImportError:
print("SciPy is not installed. Please install via pip or conda.")
problems += 1
try:
import matplotlib
print('matplotlib', matplotlib.__version__)
except ImportError:
print("matplotlib is not installed. Please install via pip or conda.")
problems += 1
try:
import theano
print('Theano', theano.__version__)
except ImportError:
print("Theano is not installed. Please install via pip or conda.")
problems += 1
try:
import pymc3
print('PyMC', pymc3.__version__)
except ImportError:
print("PyMC 3 is not installed. Please install via pip:\npip install -U git+git://github.com/pymc-devs/pymc3.git")
problems += 1
try:
import sklearn
print('scikit-learn', sklearn.__version__)
except ImportError:
print("scikit-learn is not installed. Please install via pip or conda.")
problems += 1
try:
import patsy
print('patsy', patsy.__version__)
except ImportError:
print("patsy is not installed. Please install via pip or conda.")
problems += 1
if not problems:
print("\nEverything's cool")
else:
print('There are', problems, 'problems. Please ensure all required components are installed.') | cc0-1.0 |
amnona/heatsequer | setup.py | 1 | 1900 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, heatsequer development team.
#
# Distributed under the terms of the Modified GPLv3 License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import re
import ast
import os
from setuptools import find_packages, setup
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: GPLv3 license
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Easy heatmap visualization of microbes')
with open('README.md') as f:
long_description = f.read()
version = '0.0.1'
setup(name='heatsequer',
version=version,
license='Modified BSD',
description=description,
long_description=long_description,
author="heatsequer development team",
author_email="[email protected]",
maintainer="heatsequer development team",
maintainer_email="[email protected]",
packages=find_packages(),
setup_requires=['numpy >= 1.9.2'],
# note, hdf5 is required to be installed beforehand
# also qt5,pyqt==5 needs to be installed beforehand
install_requires=[
'biom-format',
'easygui',
'scipy',
'numpy',
'networkx',
'scikit-learn',
'matplotlib',
'h5py',
'requests',
],
classifiers=classifiers,
package_data={
}
)
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/examples/ex_kernel_regression_sigtest.py | 34 | 3177 | # -*- coding: utf-8 -*-
"""Kernel Regression and Significance Test
Warning: SLOW, 11 minutes on my computer
Created on Thu Jan 03 20:20:47 2013
Author: Josef Perktold
results - this version
----------------------
>>> exec(open('ex_kernel_regression_censored1.py').read())
bw
[ 0.3987821 0.50933458]
[0.39878209999999997, 0.50933457999999998]
sig_test - default
Not Significant
pvalue
0.11
test statistic 0.000434305313291
bootstrap critical values
[ 0.00043875 0.00046808 0.0005064 0.00054151]
sig_test - pivot=True, nboot=200, nested_res=50
pvalue
0.01
test statistic 6.17877171579
bootstrap critical values
[ 5.5658345 5.74761076 5.87386858 6.46012041]
times: 8.34599995613 20.6909999847 666.373999834
"""
from __future__ import print_function
import time
import numpy as np
import statsmodels.nonparametric.api as nparam
import statsmodels.nonparametric.kernel_regression as smkr
if __name__ == '__main__':
t0 = time.time()
#example from test file
nobs = 200
np.random.seed(1234)
C1 = np.random.normal(size=(nobs, ))
C2 = np.random.normal(2, 1, size=(nobs, ))
noise = np.random.normal(size=(nobs, ))
Y = 0.3 +1.2 * C1 - 0.9 * C2 + noise
#self.write2file('RegData.csv', (Y, C1, C2))
#CODE TO PRODUCE BANDWIDTH ESTIMATION IN R
#library(np)
#data <- read.csv('RegData.csv', header=FALSE)
#bw <- npregbw(formula=data$V1 ~ data$V2 + data$V3,
# bwmethod='cv.aic', regtype='lc')
model = nparam.KernelReg(endog=[Y], exog=[C1, C2],
reg_type='lc', var_type='cc', bw='aic')
mean, marg = model.fit()
#R_bw = [0.4017893, 0.4943397] # Bandwidth obtained in R
bw_expected = [0.3987821, 0.50933458]
#npt.assert_allclose(model.bw, bw_expected, rtol=1e-3)
print('bw')
print(model.bw)
print(bw_expected)
print('\nsig_test - default')
print(model.sig_test([1], nboot=100))
t1 = time.time()
res0 = smkr.TestRegCoefC(model, [1])
print('pvalue')
print((res0.t_dist >= res0.test_stat).mean())
print('test statistic', res0.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort0 = np.sort(res0.t_dist)
nrep0 = len(bsort0)
print(bsort0[(probs * nrep0).astype(int)])
t2 = time.time()
print('\nsig_test - pivot=True, nboot=200, nested_res=50')
res1 = smkr.TestRegCoefC(model, [1], pivot=True, nboot=200, nested_res=50)
print('pvalue')
print((res1.t_dist >= res1.test_stat).mean())
print('test statistic', res1.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort1 = np.sort(res1.t_dist)
nrep1 = len(bsort1)
print(bsort1[(probs * nrep1).astype(int)])
t3 = time.time()
print('times:', t1-t0, t2-t1, t3-t2)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.plot(x, y, 'o', alpha=0.5)
# ax.plot(x, y_cens, 'o', alpha=0.5)
# ax.plot(x, y_true, lw=2, label='DGP mean')
# ax.plot(x, sm_mean, lw=2, label='model 0 mean')
# ax.plot(x, mean2, lw=2, label='model 2 mean')
# ax.legend()
#
# plt.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/indexes/period/test_astype.py | 2 | 5097 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Int64Index, NaT, Period, PeriodIndex, period_range
import pandas.util.testing as tm
class TestPeriodIndexAsType(object):
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]'])
def test_astype_raises(self, dtype):
# GH#13149, GH#13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
msg = 'Cannot cast PeriodArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_conversion(self):
# GH#13149, GH#13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(np.int64)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_uint(self):
arr = period_range('2000', periods=2)
expected = pd.UInt64Index(np.array([10957, 10958], dtype='uint64'))
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_object(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
assert result[2] is pd.NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
assert result_list[2] is pd.NaT
def test_astype_category(self):
obj = pd.period_range("2000", periods=2)
result = obj.astype('category')
expected = pd.CategoricalIndex([pd.Period('2000-01-01', freq="D"),
pd.Period('2000-01-02', freq="D")])
tm.assert_index_equal(result, expected)
result = obj._data.astype('category')
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.period_range("2000", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
logicabrity/odes | docs/src/examples/planarpendulum.py | 3 | 5001 | # Authors: B. Malengier
"""
This example shows how to solve the planar pendulum in full coordinate space.
This results in a dae system with one algebraic equation.
The problem is easily stated: a pendulum must move on a circle with radius 1,
it has a mass m, and gravitational accelleration is g.
The Lagragian is L = 1/2 m (u^2 + v^2) - m g y,
with constraint: x^2+y^2 = 1.
Adding a Lagrange multiplier \lambda, we arrive at the Euler Lagrange
differential equations for the problem:
\dot{x} = u
\dot{y} = v
\dot{u} = \lambda x/m
\dot{v} = \lambda y/m - g
and \lambda must be such that the constraint is satisfied:
x^2+y^2 = 1
DASPK cannot solve the above. Hence we derive a different constraint that
contains more of the unknowns, as well as \lambda.
Derivation to time of the constraint gives a new constraint:
x u + y v =0
Derivating a second time to time gives us:
u^2 + v^2 + x \dot{u} + y \dot{v} = 0
which can be written with the known form of \dot{u}, \dot{v} as
u^2 + v^2 + \labmda l^2/m - g y = 0
This last expression will be used to find the solution to the planar
pendulum problem.
The algorithm first needs to find initial conditions for the derivatives,
then it solves the problme at hand. We take g=1, m=1
"""
#python 2.7 support
from __future__ import print_function, division
try:
input = raw_input
except:
pass
from numpy import (arange, zeros, array, sin)
import numpy as np
from scikits.odes.sundials import ida
import matplotlib.pyplot as plt
def draw_graphs(fignum, t, x, y):
plt.ion()
plt.figure(fignum)
plt.subplot(211)
plt.scatter(x, y)
plt.xlabel('x coordinate')
plt.ylabel('y coordinate')
plt.axis('equal')
plt.subplot(212)
plt.plot(t, x, 'b', label='x coordinate')
plt.plot(t, y, 'k', label='y coordinate')
plt.legend()
plt.ylabel('Coordinate')
plt.xlabel('Time')
plt.show()
class oscres(ida.IDA_RhsFunction):
def evaluate(self, t, x, xdot, result, userdata):
g=1
result[0]=x[2]-xdot[0]
result[1]=x[3]-xdot[1]
result[2]=-xdot[2]-x[4]*x[0]
result[3]=-xdot[3]-x[4]*x[1]-g
#tmp[4]=x[0]*x[0]+x[1]*x[1]-1
#tmp[4]=x[0]*x[2]+x[1]*x[3]
result[4] = x[2]**2 + x[3]**2 \
- (x[0]**2 + x[1]**2)*x[4] - x[1] * g
return 0
res=oscres()
class SimpleOscillator():
stop_t = arange(.0, 15, 0.2, dtype=np.float)
theta= 3.14/3 #starting angle
x0=sin(theta)
y0=-(1-x0**2)**.5
g=1
lambdaval = 0.1
#z0 = array([x0, y0, 0., 0., lambdaval], np.float)
#zp0 = array([0., 0., -lambdaval*x0, -lambdaval*y0-g, -g], np.float)
z0 = [x0, y0, 0., 0., lambdaval]
zp0 = [0., 0., -lambdaval*x0, -lambdaval*y0-g, -g]
problem = SimpleOscillator()
time = problem.stop_t
nr = len(time)
# Variant 1: Solving the problem with the 'solve' method
solver=ida.IDA(res,
compute_initcond='yp0',
first_step_size=1e-18,
atol=1e-6,rtol=1e-6,
algebraic_vars_idx=[4])
# strip unneeded return values from run_solver
_flag, t1, y1 = solver.solve(time, problem.z0, problem.zp0)[:3]
xt = y1[:, 0]
yt = y1[:, 1]
draw_graphs(1, t1, xt, yt)
# Variant 2: Solving the problem with the more versatile (but slower) method 'step'
problem.x0 = problem.x0 * 2
problem.y0 = problem.y0 * 2
problem.z0 = array([problem.x0, problem.y0, 0., 0., problem.lambdaval], np.float)
y2 = np.empty([nr, len(problem.z0)], float)
# options for solver remain the same
# solver.set_options(...)
p2_z0 = np.asarray(problem.z0, float)
p2_zp0 = np.asarray(problem.zp0, float)
solver.init_step(time[0], p2_z0, p2_zp0)
y2[0, :] = problem.z0
for i in range(len(time))[1:]:
solver.step(time[i], y2[i, :])
xt = y2[:, 0]
yt = y2[:, 1]
draw_graphs(2, time, xt, yt)
# Variant 3: The same as variant 1 (intial value as in variant 2), but adding
# a rootfn - in this case we simply output the current values
# and stop when t == 100
class RootFn(ida.IDA_RootFunction):
def evaluate(self, t, x, xdot, out, userdata):
print('t = ', t, ', x = ', x[0], ', y = ', x[1],
', xdot = ', xdot[0], ', ydot = ', xdot[1])
print ('out', out)
out[0] = 10.-t
return 0
rootfn = RootFn()
solver.set_options(nr_rootfns = 1, rootfn=rootfn)
if not isinstance(rootfn, ida.IDA_RootFunction):
print ('Test for IDA_RootFunction instance failed')
_flag, t3, y3 = solver.solve(time, problem.z0, problem.zp0)[:3]
xt = y3[:, 0]
yt = y3[:, 1]
draw_graphs(3, t3, xt, yt)
# Variant 4: The same as variant 3 but a python function as root function
def root_fn(t, x, xdot, out):
print('t = ', t, ', x = ', x[0], ', y = ', x[1],
', xdot = ', xdot[0], ', ydot = ', xdot[1])
out[0] = 10.-t
return 0
solver.set_options(nr_rootfns = 1, rootfn=root_fn)
_flag, t4, y4 = solver.solve(time, problem.z0, problem.zp0)[:3]
xt = y4[:, 0]
yt = y4[:, 1]
draw_graphs(4, t4, xt, yt)
input("Press a button to finish.")
| bsd-3-clause |
CosmicFish/CosmicFish | camb/eftcamb/tests_EFT/python/CAMB_plots_lib/CMB_plots.py | 2 | 11153 | import numpy as np
import matplotlib.pyplot as plt
import math
class CMB_plots:
"""
Class that contains the methods to optimally plot the CMB power spectra
"""
# general plot settings:
color = 'red' # default color of the plot
axes_label_position = 'left' # position of the y axes and y axes label
negative_line_style = '--'
# comparison plot settings:
comparison = False # if the plot is a comparison of spectra or just the plot
comparison_min = 10.0**(-3) # minimum y value of comparison plots
comparison_max = 1.1*10.0**(+3) # maximum y value of comparison plots
Fsky = 0.85 # fsky for cosmic variance
def CosmicVariance(self,l):
""" function that computes cosmic variance at a given l"""
return math.sqrt(2.0/((2.0*l + 1.0)*self.Fsky))
def TT_plot(self, stream, xval, yval):
""" CMB temperature power spectrum plot """
# do the plot:
self.TT_p, = stream.plot( xval, yval, color = self.color )
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l(l+1)C_l^{TT}/ 2\pi$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
# plot cosmic variance
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
self.TT_p, = stream.plot( xval, -yval, color = self.color, linestyle=self.negative_line_style )
# set log scale
stream.set_yscale('Log')
# set limits and label
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{TT}/ C_l^{TT} (\%) $')
def EE_plot(self,stream, xval, yval):
""" CMB E mode polarization power spectrum plot """
# do the plot:
self.EE_p, = stream.plot(xval, yval, color = self.color)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l(l+1)C_l^{EE}/ 2\pi$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.EE_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{EE}/ C_l^{EE} (\%) $')
def TE_plot(self,stream, xval, yval):
""" CMB temperature E mode polarization cross correlation power spectrum plot """
# do the plot:
self.TE_p, = stream.plot(xval, yval, color = self.color)
self.TE_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l(l+1)C_l^{TE}/ 2\pi$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{TE}/ C_l^{TE} (\%) $')
def BB_plot(self,stream, xval, yval):
""" CMB B mode polarization power spectrum plot """
# do the plot:
self.BB_p, = stream.plot(xval, yval, color = self.color)
self.BB_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l(l+1)C_l^{BB}/ 2\pi$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{BB}/ C_l^{BB} (\%) $')
def Phi_plot(self,stream, xval, yval):
""" CMB lensing power spectrum plot """
# do the plot:
self.Phi_p, = stream.plot(xval, yval, color = self.color)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l^4 C_l^{\Phi\Phi}$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.Phi_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{\Phi\Phi}/ C_l^{\Phi\Phi} (\%) $')
def PhiT_plot(self,stream, xval, yval):
""" CMB lensing and temperature cross correlation power spectrum plot """
# do the plot:
self.PhiT_p, = stream.plot(xval, yval, color = self.color)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
stream.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# set axes scales
stream.set_xscale('Log')
# set labels
stream.set_xlabel(r'$l$')
stream.set_ylabel(r'$l^3 C_l^{\Phi T}$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.PhiT_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta C_l^{\Phi T}/ C_l^{\Phi T} (\%) $')
def Generic_Cl(self,stream, xval, yval):
""" Generic spectrum plotter (in l-space) """
# take the abs value of y-val
yval = np.array(yval)
# do the plot:
self.Generic_Cl_plot, = stream.plot(xval, yval, color = self.color)
self.Generic_Cl_plot, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
ycosmicvar = np.array(map(self.CosmicVariance,xval))*100
self.CV_p, = stream.plot(xval, ycosmicvar, color = 'k')
stream.set_ylim(self.comparison_min, self.comparison_max)
def Matter_plot(self,stream, xval, yval):
""" Matter power spectrum plot """
# do the plot:
self.Matter_p, = stream.plot(xval, yval, color = self.color)
self.Matter_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set labels
stream.set_xlabel(r'$k$')
stream.set_ylabel(r'$P(k)$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta P(k)/ P(k) (\%) $')
def Transfer_plot(self,stream, xval, yval):
""" Transfer functions plot """
# do the plot:
self.Transfer_p, = stream.plot(xval, yval, color = self.color)
self.Transfer_p, = stream.plot(xval, -yval, color = self.color, linestyle=self.negative_line_style)
# set x axes boundary:
stream.set_xlim(np.amin(xval),np.amax(xval))
# set axes scales
stream.set_xscale('Log')
stream.set_yscale('Log')
# set labels
stream.set_ylabel(r'$T(k)$')
# set the position of axes and label:
stream.yaxis.set_label_position(self.axes_label_position)
if self.axes_label_position == 'right':
stream.yaxis.tick_right()
# setup if comparison:
if self.comparison:
stream.set_yscale('Log')
stream.set_ylim(self.comparison_min, self.comparison_max)
stream.set_ylabel(r'$\Delta T(k)/ T(k) (\%) $')
| gpl-3.0 |
gmatteo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/voronoi.py | 5 | 44209 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "Feb 20, 2016"
import logging
import time
import numpy as np
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import (
get_lower_and_upper_f,
my_solid_angle,
rectangle_surface_intersection,
)
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list.
Args:
bson_nb_voro_list2: List of periodic sites involved in the Voronoi.
structure: Structure object.
Returns:
The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format).
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == "None":
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd["index"]]
periodic_site = PeriodicSite(
struct_site._species,
struct_site.frac_coords + psd[1],
struct_site._lattice,
properties=struct_site.properties,
)
dd["site"] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
default_normalized_distance_tolerance = 1e-5
default_normalized_angle_tolerance = 1e-3
def __init__(
self,
structure=None,
voronoi_list2=None,
voronoi_cutoff=default_voronoi_cutoff,
isites=None,
normalized_distance_tolerance=default_normalized_distance_tolerance,
normalized_angle_tolerance=default_normalized_angle_tolerance,
additional_conditions=None,
valences=None,
maximum_distance_factor=None,
minimum_angle_factor=None,
):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method).
Args:
structure: Structure for which the Voronoi is computed.
voronoi_list2: List of voronoi polyhedrons for each site.
voronoi_cutoff: cutoff used for the voronoi.
isites: indices of sites for which the Voronoi has to be computed.
normalized_distance_tolerance: Tolerance for two normalized distances to be considered equal.
normalized_angle_tolerance:Tolerance for two normalized angles to be considered equal.
additional_conditions: Additional conditions to be used.
valences: Valences of all the sites in the structure (used when additional conditions require it).
maximum_distance_factor: The maximum distance factor to be considered.
minimum_angle_factor: The minimum angle factor to be considered.
Raises:
RuntimeError if the Voronoi cannot be constructed.
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.debug("Setting Voronoi list")
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.debug("Setting neighbors distances and angles")
t1 = time.process_time()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.process_time()
logging.debug("Neighbors distances and angles set up in {:.2f} seconds".format(t2 - t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull.
Args:
indices: indices of the sites for which the Voronoi is needed.
voronoi_cutoff: Voronoi cutoff for the search of neighbours.
Raises:
RuntimeError: If an infinite vertex is found in the voronoi construction.
"""
self.voronoi_list2 = [None] * len(self.structure)
self.voronoi_list_coords = [None] * len(self.structure)
logging.debug("Getting all neighbors in structure")
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.process_time()
logging.debug("Setting up Voronoi list :")
for jj, isite in enumerate(indices):
logging.debug(" - Voronoi analysis for site #{:d} ({:d}/{:d})".format(isite, jj + 1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError(
"This structure is pathological," " infinite vertex in the voronoi " "construction"
)
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss, tolerance=1.0e-6):
myindex = iii
break
results2.append(
{
"site": neighbors[ridge_point2],
"angle": sa,
"distance": distances[ridge_point2],
"index": myindex,
}
)
for dd in results2:
dd["normalized_angle"] = dd["angle"] / maxangle
dd["normalized_distance"] = dd["distance"] / mindist
self.voronoi_list2[isite] = results2
self.voronoi_list_coords[isite] = np.array([dd["site"].coords for dd in results2])
t2 = time.process_time()
logging.debug("Voronoi list set up in {:.2f} seconds".format(t2 - t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations.
Args:
indices: Indices of the sites for which the Voronoi is needed.
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
# Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict["normalized_distance"] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append(
{
"min": normalized_distances[isorted_distances[0]],
"max": normalized_distances[isorted_distances[0]],
}
)
self.neighbors_distances[isite].append(
{
"min": results[isorted_distances[0]]["distance"],
"max": results[isorted_distances[0]]["distance"],
}
)
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wd,
self.neighbors_normalized_distances[isite][icurrent]["max"],
rtol=0.0,
atol=self.normalized_distance_tolerance,
):
self.neighbors_normalized_distances[isite][icurrent]["max"] = wd
self.neighbors_distances[isite][icurrent]["max"] = results[idist]["distance"]
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({"min": wd, "max": wd})
self.neighbors_distances[isite].append(
{
"min": results[idist]["distance"],
"max": results[idist]["distance"],
}
)
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist + 1]
dist_dict["next"] = dist_dict_next["min"]
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict["next"] = ndist_dict_next["min"]
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]["min"]
self.neighbors_normalized_distances[isite][-1]["next"] = dfact
self.neighbors_distances[isite][-1]["next"] = dfact * self.neighbors_distances[isite][0]["min"]
# Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict["normalized_angle"] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append(
{
"max": normalized_angles[isorted_angles[0]],
"min": normalized_angles[isorted_angles[0]],
}
)
self.neighbors_angles[isite].append(
{
"max": results[isorted_angles[0]]["angle"],
"min": results[isorted_angles[0]]["angle"],
}
)
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wa,
self.neighbors_normalized_angles[isite][icurrent]["min"],
rtol=0.0,
atol=self.normalized_angle_tolerance,
):
self.neighbors_normalized_angles[isite][icurrent]["min"] = wa
self.neighbors_angles[isite][icurrent]["min"] = results[iang]["angle"]
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({"max": wa, "min": wa})
self.neighbors_angles[isite].append({"max": results[iang]["angle"], "min": results[iang]["angle"]})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict["next"] = ang_dict_next["max"]
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict["next"] = nang_dict_next["max"]
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]["next"] = afact
self.neighbors_angles[isite][-1]["next"] = afact * self.neighbors_angles[isite][0]["max"]
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(
self.AC.check_condition(
condition=ac,
structure=self.structure,
parameters={
"valences": valences,
"neighbor_index": vals["index"],
"site_index": ivoronoi,
},
)
)
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(
vals["normalized_distance"] <= dp
or np.isclose(
vals["normalized_distance"],
dp,
rtol=0.0,
atol=self.normalized_distance_tolerance / 2.0,
)
)
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(
vals["normalized_angle"] >= ap
or np.isclose(
vals["normalized_angle"],
ap,
rtol=0.0,
atol=self.normalized_angle_tolerance / 2.0,
)
)
return angle_conditions
# def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
# if self.neighbors_normalized_distances[isite] is None:
# return None
# dist_where = np.argwhere(
# np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
# if len(dist_where) == 0:
# return None
# idist = dist_where[-1][0]
# ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
# if len(ang_where) == 0:
# return None
# iang = ang_where[0][0]
# if self.additional_conditions.count(additional_condition) != 1:
# return None
# i_additional_condition = self.additional_conditions.index(additional_condition)
# return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
"""
Get the different surfaces corresponding to the different distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau * this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
"""
Get the different surfaces (using boundaries) corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site.
surface_calculation_options: Options for the boundaries.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.2, "upper": 1.8},
"angle_bounds": {"lower": 0.1, "upper": 0.8},
}
if surface_calculation_options["type"] in [
"standard_elliptic",
"standard_diamond",
"standard_spline",
]:
plot_type = {
"distance_parameter": ("initial_normalized", None),
"angle_parameter": ("initial_normalized", None),
}
else:
raise ValueError(
'Type "{}" for the surface calculation in DetailedVoronoiContainer '
"is invalid".format(surface_calculation_options["type"])
)
max_dist = surface_calculation_options["distance_bounds"]["upper"] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(
isite=isite, plot_type=plot_type, max_dist=max_dist
)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options["distance_bounds"]["lower"]
maxdist = surface_calculation_options["distance_bounds"]["upper"]
minang = surface_calculation_options["angle_bounds"]["lower"]
maxang = surface_calculation_options["angle_bounds"]["upper"]
f_lower = lower_and_upper_functions["lower"]
f_upper = lower_and_upper_functions["upper"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp + 1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap + 1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(
rectangle=((d1, d2), (ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False,
)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(
self,
isite,
surface_calculation_type=None,
max_dist=2.0,
additional_conditions=None,
):
"""
Get the different surfaces and their cn_map corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(
isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=max_dist,
)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
"""
Get the different surfaces (using boundaries) and their cn_map corresponding to the different
distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_options: Options for the boundaries.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
"""
Get the neighbors of a given site corresponding to a given distance and angle factor.
Args:
isite: Index of the site.
distfactor: Distance factor.
angfactor: Angle factor.
additional_condition: Additional condition to be used (currently not implemented).
Returns:
List of neighbors of the given site for the given distance and angle factors.
"""
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd["min"]:
idist = iwd
dfact = wd["max"]
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa["max"]:
iang = iwa
afact = wa["min"]
else:
break
if idist is None or iang is None:
raise ValueError("Distance or angle parameter not found ...")
return [
nb
for nb in self.voronoi_list2[isite]
if nb["normalized_distance"] <= dfact and nb["normalized_angle"] >= afact
]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
"""
Get the different boundaries and limits of the distance and angle factors for the given site.
Args:
isite: Index of the site.
plot_type: Types of distance/angle parameters to get.
max_dist: Maximum distance factor.
Returns:
Distance and angle bounds and limits.
"""
# Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {
"distance_parameter": ("initial_inverse_opposite", None),
"angle_parameter": ("initial_opposite", None),
}
dd = [dist["min"] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type["distance_parameter"][0] == "initial_normalized":
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type["distance_parameter"][0] == "initial_inverse_opposite":
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type["distance_parameter"][0] == "initial_inverse3_opposite":
ddinv = [1.0 / dist ** 3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the distance is not implemented".format(plot_type["distance_parameter"])
)
if plot_type["angle_parameter"][0] == "initial_normalized":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type["angle_parameter"][0] == "initial_opposite":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the angle is not implemented".format(plot_type["angle_parameter"])
)
ang_limits = [0.0, 1.0]
return {
"distance_bounds": distance_bounds,
"distance_limits": dist_limits,
"angle_bounds": angle_bounds,
"angle_limits": ang_limits,
}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
"""
Whether two DetailedVoronoiContainer objects are close to each other.
Args:
other: Another DetailedVoronoiContainer to be compared with.
rtol: Relative tolerance to compare values.
atol: Absolute tolerance to compare values.
Returns:
True if the two DetailedVoronoiContainer are close to each other.
"""
isclose = (
np.isclose(
self.normalized_angle_tolerance,
other.normalized_angle_tolerance,
rtol=rtol,
atol=atol,
)
and np.isclose(
self.normalized_distance_tolerance,
other.normalized_distance_tolerance,
rtol=rtol,
atol=atol,
)
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
return False
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb["site"] == nb2["site"]:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb["distance"], nb_other["distance"], rtol=rtol, atol=atol):
return False
if not np.isclose(nb["angle"], nb_other["angle"], rtol=rtol, atol=atol):
return False
if not np.isclose(
nb["normalized_distance"],
nb_other["normalized_distance"],
rtol=rtol,
atol=atol,
):
return False
if not np.isclose(
nb["normalized_angle"],
nb_other["normalized_angle"],
rtol=rtol,
atol=atol,
):
return False
if nb["index"] != nb_other["index"]:
return False
if nb["site"] != nb_other["site"]:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Radial Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize distances.
figsize: Size of the figure.
step_function: Type of step function to be used for the RDF.
Returns:
Matplotlib figure.
"""
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "normal_cdf", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([dd["min"] for dd in dists])
sorted_dists = [dists[ii]["min"] for ii in isorted]
dnb_dists = [len(dists[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_dists[idist])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
mydists = [dp_func(dd["min"]) for dd in dists]
mydcns = [len(dd["dnb_indices"]) for dd in dists]
xx = np.linspace(0.0, 1.1 * max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Solid Angle Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize angles.
figsize: Size of the figure.
step_function: Type of step function to be used for the SADF.
Returns:
Matplotlib figure.
"""
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "step_function", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([ap_func(aa["min"]) for aa in angs])
sorted_angs = [ap_func(angs[ii]["min"]) for ii in isorted]
dnb_angs = [len(angs[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_angs[iang])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
myangs = [ap_func(aa["min"]) for aa in angs]
mydcns = [len(dd["dnb_indices"]) for dd in angs]
xx = np.linspace(0.0, 1.1 * max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (
self.normalized_angle_tolerance == other.normalized_angle_tolerance
and self.normalized_distance_tolerance == other.normalized_distance_tolerance
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
and self.voronoi_list2 == other.voronoi_list2
and self.structure == other.structure
)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
Returns:
[vlist, bson_nb_voro_list], to be used in the as_dict method.
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == "None":
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict["site"]
site_dict = {key: val for key, val in nb_dict.items() if key not in ["site"]}
# site_voro.append([ps.as_dict(), dd]) [float(c) for c in self.frac_coords]
diff = site.frac_coords - self.structure[nb_dict["index"]].frac_coords
site_voro.append([[nb_dict["index"], [float(c) for c in diff]], site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
Returns:
dictionary that is BSON-encodable.
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
Args:
d: dict representation of the VoronoiContainer object.
Returns:
VoronoiContainer object.
"""
structure = Structure.from_dict(d["structure"])
voronoi_list2 = from_bson_voronoi_list2(d["bson_nb_voro_list2"], structure)
maximum_distance_factor = d["maximum_distance_factor"] if "maximum_distance_factor" in d else None
minimum_angle_factor = d["minimum_angle_factor"] if "minimum_angle_factor" in d else None
return cls(
structure=structure,
voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d["normalized_angle_tolerance"],
normalized_distance_tolerance=d["normalized_distance_tolerance"],
additional_conditions=d["additional_conditions"],
valences=d["valences"],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor,
)
| mit |
bzero/statsmodels | statsmodels/tsa/base/tests/test_base.py | 27 | 2106 | import numpy as np
from pandas import Series
from pandas import date_range
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
def test_pandas_nodates_index():
from statsmodels.datasets import sunspots
y = sunspots.load_pandas().data.SUNACTIVITY
npt.assert_raises(ValueError, TimeSeriesModel, y)
def test_predict_freq():
# test that predicted dates have same frequency
x = np.arange(1,36.)
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
#npt.assert_(model.data.freq == "AS-APR")
npt.assert_(model.data.freq == "A-APR")
start = model._get_predict_start("2006-4-30")
end = model._get_predict_end("2016-4-30")
model._make_predict_dates()
predict_dates = model.data.predict_dates
#expected_dates = date_range("2006-12-31", "2016-12-31",
# freq="AS-APR")
expected_dates = date_range("2006-4-30", "2016-4-30", freq="A-APR")
assert_equal(predict_dates, expected_dates)
#ptesting.assert_series_equal(predict_dates, expected_dates)
def test_keyerror_start_date():
x = np.arange(1,36.)
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_raises(ValueError, model._get_predict_start, "1970-4-30")
def test_period_index():
# test 1285
from pandas import PeriodIndex, TimeSeries
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
x = np.arange(1, 21.)
model = TimeSeriesModel(Series(x, index=dates))
npt.assert_(model.data.freq == "M")
model = TimeSeriesModel(TimeSeries(x, index=dates))
npt.assert_(model.data.freq == "M")
| bsd-3-clause |
cainiaocome/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
CompPhysics/ComputationalPhysicsMSU | doc/Programs/LecturePrograms/programs/StatPhys/python/ising2dim.py | 4 | 4976 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import math, sys
def periodic (i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i+limit+add) % limit
def monteCarlo(temp, NSpins, MCcycles):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for
- NSpins: dimension of square matrix
- MCcycles: Monte-carlo MCcycles (how many times do we
flip the matrix?)
Output:
- E_av: Energy of matrix averaged over MCcycles, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over MCcycles, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over MCcycles
"""
#Setup spin matrix, initialize to ground state
spin_matrix = np.zeros( (NSpins,NSpins), np.int8) + 1
#Create and initialize variables
E = M = 0
E_av = E2_av = M_av = M2_av = Mabs_av = 0
#Setup array for possible energy changes
w = np.zeros(17,np.float64)
for de in range(-8,9,4): #include +8
w[de+8] = math.exp(-de/temp)
#Calculate initial magnetization:
M = spin_matrix.sum()
#Calculate initial energy
for j in range(NSpins):
for i in range(NSpins):
E -= spin_matrix.item(i,j)*\
(spin_matrix.item(periodic(i,NSpins,-1),j) + spin_matrix.item(i,periodic(j,NSpins,1)))
#Start metropolis MonteCarlo computation
for i in range(MCcycles):
#Metropolis
#Loop over all spins, pick a random spin each time
for s in range(NSpins**2):
x = int(np.random.random()*NSpins)
y = int(np.random.random()*NSpins)
deltaE = 2*spin_matrix.item(x,y)*\
(spin_matrix.item(periodic(x,NSpins,-1), y) +\
spin_matrix.item(periodic(x,NSpins,1), y) +\
spin_matrix.item(x, periodic(y,NSpins,-1)) +\
spin_matrix.item(x, periodic(y,NSpins,1)))
if np.random.random() <= w[deltaE+8]:
#Accept!
spin_matrix[x,y] *= -1
M += 2*spin_matrix[x,y]
E += deltaE
#Update expectation values
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
#Normalize average values
E_av /= float(MCcycles);
E2_av /= float(MCcycles);
M_av /= float(MCcycles);
M2_av /= float(MCcycles);
Mabs_av /= float(MCcycles);
#Calculate variance and normalize to per-point and temp
E_variance = (E2_av-E_av*E_av)/float(NSpins*NSpins*temp*temp);
M_variance = (M2_av-M_av*M_av)/float(NSpins*NSpins*temp);
#Normalize returned averages to per-point
E_av /= float(NSpins*NSpins);
M_av /= float(NSpins*NSpins);
Mabs_av /= float(NSpins*NSpins);
return (E_av, E_variance, M_av, M_variance, Mabs_av)
# Main program
# temperature steps, initial temperature, final temperature
NumberTsteps = 20
InitialT = 1.5
FinalT = 2.5
Tsteps = (FinalT-InitialT)/NumberTsteps
Temp = np.zeros(NumberTsteps)
for T in range(NumberTsteps):
Temp[T] = InitialT+T*Tsteps
# Declare arrays that hold averages
Energy = np.zeros(NumberTsteps); Magnetization = np.zeros(NumberTsteps)
SpecificHeat = np.zeros(NumberTsteps); Susceptibility = np.zeros(NumberTsteps)
MagnetizationAbs = np.zeros(NumberTsteps)
# Define number of spins
NSpins = 20
# Define number of Monte Carlo cycles
MCcycles = 100000
# Perform the simulations over a range of temperatures
for T in range(NumberTsteps):
(Energy[T], SpecificHeat[T], Magnetization[T], Susceptibility[T], MagnetizationAbs[T]) = monteCarlo(Temp[T],NSpins,MCcycles)
# And finally plot
f = plt.figure(figsize=(18, 10)); # plot the calculated values
sp = f.add_subplot(2, 2, 1 );
plt.plot(Temp, Energy, 'o', color="green");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Energy ", fontsize=20);
sp = f.add_subplot(2, 2, 2 );
plt.plot(Temp, abs(Magnetization), 'o', color="red");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Magnetization ", fontsize=20);
sp = f.add_subplot(2, 2, 3 );
plt.plot(Temp, SpecificHeat, 'o', color="blue");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Specific Heat ", fontsize=20);
sp = f.add_subplot(2, 2, 4 );
plt.plot(Temp, Susceptibility, 'o', color="black");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Susceptibility", fontsize=20);
plt.show()
| cc0-1.0 |
jiminliang/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
sumspr/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/tests/test_multioutput.py | 4 | 2906 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn import datasets
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import Lasso
from sklearn.multioutput import MultiOutputRegressor
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:,n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X_train, y_train)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1,2,3], [4,5,6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1,2,3], [4,5,6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1,2,3], [1,2,3], [4,5,6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5,2.5,3.5], [3.5,4.5,5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
| bsd-3-clause |
pearsonlab/thunder | thunder/utils/common.py | 7 | 10192 | """ Common operations and utilities """
def pinv(mat):
""" Compute pseudoinverse of a matrix """
from scipy.linalg import inv
from numpy import dot, transpose
return dot(inv(dot(mat, transpose(mat))), mat)
def loadMatVar(filename, var):
""" Load a variable from a MAT file"""
from scipy.io import loadmat
return loadmat(filename)[var]
def isRdd(data):
""" Check whether data is an RDD or not"""
dtype = type(data)
import pyspark
if (dtype == pyspark.rdd.RDD) | (dtype == pyspark.rdd.PipelinedRDD):
return True
else:
return False
def aslist(x):
""" Convert numpy arrays to lists, keep lists as lists """
from numpy import ndarray
if isinstance(x, ndarray):
return x.tolist()
elif isinstance(x, list):
return x
else:
raise TypeError("Expected list or numpy array, got %s" % type(x))
def checkParams(param, opts):
""" Check whether param is contained in opts (including lowercase), otherwise error """
if not param.lower() in opts:
raise ValueError("Option must be one of %s, got %s" % (str(opts)[1:-1], param))
def selectByMatchingPrefix(param, opts):
"""
Given a string parameter and a sequence of possible options, returns an option that is uniquely
specified by matching its prefix to the passed parameter.
The match is checked without sensitivity to case.
Throws IndexError if none of opts starts with param, or if multiple opts start with param.
>> selectByMatchingPrefix("a", ["aardvark", "mongoose"])
"aardvark"
"""
lparam = param.lower()
hits = [opt for opt in opts if opt.lower().startswith(lparam)]
nhits = len(hits)
if nhits == 1:
return hits[0]
if nhits:
raise IndexError("Multiple matches for for prefix '%s': %s" % (param, hits))
else:
raise IndexError("No matches for prefix '%s' found in options %s" % (param, opts))
def smallestFloatType(dtype):
"""
Returns the smallest floating point dtype to which the passed dtype can be safely cast.
For integers and unsigned ints, this will generally be next floating point type larger than the integer type. So
for instance, smallest_float_type('uint8') -> dtype('float16'), smallest_float_type('int16') -> dtype('float32'),
smallest_float_type('uint32') -> dtype('float64').
This function relies on numpy's promote_types function.
"""
from numpy import dtype as dtypeFunc
from numpy import promote_types
inType = dtypeFunc(dtype)
compSize = max(2, inType.itemsize) # smallest float is at least 16 bits
compType = dtypeFunc('=f'+str(compSize)) # compare to a float of the same size
return promote_types(inType, compType)
def pil_to_array(pilImage):
"""
Load a PIL image and return it as a numpy array. Only supports greyscale images;
the return value will be an M x N array.
Adapted from matplotlib's pil_to_array, copyright 2009-2012 by John D Hunter
"""
# This is intended to be used only with older versions of PIL, for which the new-style
# way of getting a numpy array (np.array(pilimg)) does not appear to work in all cases.
# np.array(pilimg) appears to work with Pillow 2.3.0; with PIL 1.1.7 it leads to
# errors similar to the following:
# In [15]: data = tsc.loadImages('/path/to/tifs/', inputformat='tif-stack')
# In [16]: data.first()[1].shape
# Out[16]: (1, 1, 1)
# In [17]: data.first()[1]
# Out[17]: array([[[ <PIL.TiffImagePlugin.TiffImageFile image mode=I;16 size=512x512 at 0x3B02B00>]]],
# dtype=object)
def toarray(im_, dtype):
"""Return a 1D array of dtype."""
from numpy import fromstring
# Pillow wants us to use "tobytes"
if hasattr(im_, 'tobytes'):
x_str = im_.tobytes('raw', im_.mode)
else:
x_str = im_.tostring('raw', im_.mode)
x_ = fromstring(x_str, dtype)
return x_
if pilImage.mode in ('RGBA', 'RGBX', 'RGB'):
raise ValueError("Thunder only supports luminance / greyscale images in pil_to_array; got image mode: '%s'" %
pilImage.mode)
if pilImage.mode == 'L':
im = pilImage # no need to luminance images
# return MxN luminance array
x = toarray(im, 'uint8')
x.shape = im.size[1], im.size[0]
return x
elif pilImage.mode.startswith('I;16'):
# return MxN luminance array of uint16
im = pilImage
if im.mode.endswith('B'):
x = toarray(im, '>u2')
else:
x = toarray(im, '<u2')
x.shape = im.size[1], im.size[0]
return x.astype('=u2')
elif pilImage.mode.startswith('I;32') or pilImage.mode == 'I':
# default 'I' mode is 32 bit; see http://svn.effbot.org/public/tags/pil-1.1.7/libImaging/Unpack.c (at bottom)
# return MxN luminance array of uint32
im = pilImage
if im.mode.endswith('B'):
x = toarray(im, '>u4')
else:
x = toarray(im, '<u4')
x.shape = im.size[1], im.size[0]
return x.astype('=u4')
elif pilImage.mode.startswith('F;16'):
# return MxN luminance array of float16
im = pilImage
if im.mode.endswith('B'):
x = toarray(im, '>f2')
else:
x = toarray(im, '<f2')
x.shape = im.size[1], im.size[0]
return x.astype('=f2')
elif pilImage.mode.startswith('F;32') or pilImage.mode == 'F':
# default 'F' mode is 32 bit; see http://svn.effbot.org/public/tags/pil-1.1.7/libImaging/Unpack.c (at bottom)
# return MxN luminance array of float32
im = pilImage
if im.mode.endswith('B'):
x = toarray(im, '>f4')
else:
x = toarray(im, '<f4')
x.shape = im.size[1], im.size[0]
return x.astype('=f4')
else: # try to convert to an rgba image
raise ValueError("Thunder only supports luminance / greyscale images in pil_to_array; got unknown image " +
"mode: '%s'" % pilImage.mode)
def parseMemoryString(memStr):
"""
Returns the size in bytes of memory represented by a Java-style 'memory string'
parseMemoryString("150k") -> 150000
parseMemoryString("2M") -> 2000000
parseMemoryString("5G") -> 5000000000
parseMemoryString("128") -> 128
Recognized suffixes are k, m, and g. Parsing is case-insensitive.
"""
if isinstance(memStr, basestring):
import re
regPat = r"""(\d+)([bBkKmMgG])?"""
m = re.match(regPat, memStr)
if not m:
raise ValueError("Could not parse '%s' as memory specification; should be NUMBER[k|m|g]" % memStr)
quant = int(m.group(1))
units = m.group(2).lower()
if units == "g":
return int(quant * 1e9)
elif units == 'm':
return int(quant * 1e6)
elif units == 'k':
return int(quant * 1e3)
return quant
else:
return int(memStr)
def handleFormat(filename, format):
"""
Given a string with filename, either:
(1) obtain format from the filename's extension or
(2) use the specified format to append an extension to filename
Returns the path to the file, the filename, and the inferred format
"""
import os
from thunder.utils.context import DEFAULT_EXTENSIONS
file = os.path.basename(filename)
path = os.path.dirname(filename)
parts = os.path.splitext(file)
ext = parts[1][1:]
if format is None:
if len(ext) == 0:
raise Exception("Cannot infer file type from name %s" % filename)
else:
format = ext
else:
if len(ext) == 0:
file += "." + DEFAULT_EXTENSIONS[format]
return path, file, format
def raiseErrorIfPathExists(path, awsCredentialsOverride=None):
"""
The ValueError message will suggest calling with overwrite=True; this function is expected to be
called from the various output methods that accept an 'overwrite' keyword argument.
"""
# check that specified output path does not already exist
from thunder.rdds.fileio.readers import getFileReaderForPath
reader = getFileReaderForPath(path)(awsCredentialsOverride=awsCredentialsOverride)
existing = reader.list(path, includeDirectories=True)
if existing:
raise ValueError("Path %s appears to already exist. Specify a new directory, or call " % path +
"with overwrite=True to overwrite.")
class AWSCredentials(object):
__slots__ = ('awsAccessKeyId', 'awsSecretAccessKey')
def __init__(self, awsAccessKeyId=None, awsSecretAccessKey=None):
self.awsAccessKeyId = awsAccessKeyId if awsAccessKeyId else None
self.awsSecretAccessKey = awsSecretAccessKey if awsSecretAccessKey else None
def __repr__(self):
def obfuscate(s):
return "None" if s is None else "<%d-char string>" % len(s)
return "AWSCredentials(accessKeyId: %s, secretAccessKey: %s)" % \
(obfuscate(self.awsAccessKeyId), obfuscate(self.awsSecretAccessKey))
def setOnContext(self, sparkContext):
sparkContext._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", self.awsAccessKeyId)
sparkContext._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", self.awsSecretAccessKey)
@classmethod
def fromContext(cls, sparkContext):
if sparkContext:
awsAccessKeyId = sparkContext._jsc.hadoopConfiguration().get("fs.s3n.awsAccessKeyId", "")
awsSecretAccessKey = sparkContext._jsc.hadoopConfiguration().get("fs.s3n.awsSecretAccessKey", "")
return AWSCredentials(awsAccessKeyId, awsSecretAccessKey)
else:
return AWSCredentials()
@property
def credentials(self):
if self.awsAccessKeyId and self.awsSecretAccessKey:
return self.awsAccessKeyId, self.awsSecretAccessKey
else:
return None, None
@property
def credentialsAsDict(self):
access, secret = self.credentials
return {"aws_access_key_id": access, "aws_secret_access_key": secret}
| apache-2.0 |
BinWang0213/Nearwellbore_Streamline | Lib/embedded.py | 1 | 25260 | #########################################################################
# (C) 2017 Department of Petroleum Engineering, #
# Univeristy of Louisiana at Lafayette, Lafayette, US. #
# #
# This code is released under the terms of the BSD license, and thus #
# free for commercial and research use. Feel free to use the code into #
# your own project with a PROPER REFERENCE. #
# #
# A near wellbore streamline tracking code #
# Author: Bin Wang #
# Email: [email protected] #
# Reference: Wang, B., Feng, Y., Du, J., et al. (2017) An Embedded #
# Grid-Free Approach for Near Wellbore Streamline Simulation. #
# doi:10.2118/SPE-182614-MS #
#########################################################################
import numpy as np
import matplotlib.pyplot as plt
import Lib.smallestenclosingcircle as enclose #smallestenclosingcircle library from Nayuki https://www.nayuki.io/page/smallest-enclosing-circle
from .geometry import *
# Embedded Method #
######################
#
# Basic Element Class
#
######################
class Panel:
"""Contains information related to a panel(Virtual Boundary elements)."""
def __init__(self, xa, ya, xb, yb,Q_bd,P_bd,marker):
"""Creates a panel from A-B. It also can be used for real boundary with ghost pairs
Arguments
---------
xa, ya -- Cartesian coordinates of the first end-point A.
xb, yb -- Cartesian coordinates of the second end-point B.
xc, yc -- Cartesian coordinates of the center point.
length -- length of this BE.
sinalpha -- sin(aj) in Fig. 1
cosalpha -- cos(aj) in Fig. 1
Q_bd -- Neumann BC the noraml pressure gradient
P_bd -- Dirchlet BC the pressure
marker -- boundary marker. e.g bd1,bd2,bd3...
rab -- half distance between two ghost pairs
"""
self.xa, self.ya = xa, ya
self.xb, self.yb = xb, yb
self.xc, self.yc = (xa+xb)/2, (ya+yb)/2 # control-point (center-point)
self.length = np.sqrt((xb-xa)**2+(yb-ya)**2) # length of the panel
#unit vector (x2-x1)/length (y2-y1)/length
#normal unit vector (y1-y2)/length (x2-x1)/length
#point with a given distance along a line: x?=x_Start+unit_vector_x*distance y?=y_Start+unit_vector_y*distance
self.rab=0.00003
self.x_a=self.xc-self.rab/2*(ya-yb)/self.length
self.y_a=self.yc-self.rab/2*(xb-xa)/self.length
self.x_b=self.xc+self.rab/2*(ya-yb)/self.length
self.y_b=self.yc+self.rab/2*(xb-xa)/self.length
# orientation of the panel (angle between x-axis and panel)
self.sinalpha=(yb-ya)/self.length
self.cosalpha=(xb-xa)/self.length
self.Qbd=Q_bd # Newmann boundary condition
self.Pbd=P_bd # Dirchlet boundary condition
self.Q = 0. # source strength
self.P = 0. # source strength
self.marker=marker # boundary marker
class Well:
"""Contains information related to a Well."""
def __init__(self, xw, yw,rw,Q,P):
"""Creates a panel.
Arguments
---------
xw, yw -- Cartesian coordinates of well source.
Q -- Flow rate of well source/sink
P -- Bottom hole pressure of well source/sink
rw -- radius of well source.
"""
self.xw, self.yw = xw, yw
self.Q = Q # Newmann boundary condition
self.P = P # Dirichelt boundary condition
self.rw = rw # raidus
######################
#
# Mesh Tools
#
######################
def Add_Line(Pts_a=(0,0),Pts_b=(0,1),Nbd=5,Qbd=10.0,Pbd=10.0,bd_marker=-1,panels=[]):
"""Creates a BE along a line boundary. Anticlock wise, it decides the outward normal direction
Arguments
---------
xa, ya -- Cartesian coordinates of the first start-point.
xb, yb -- Cartesian coordinates of the second end-point
Nbd -- Number of elements in this line
Qbd -- Neumann B.C
Pbd -- Dirchelet B.C
Panels -- BE container, return value
"""
Pts=EndPointOnLine(Pts_a,Pts_b,Nseg=Nbd,refinement="cosspace")
for i in range(Nbd):
panels.append(Panel(Pts[i][0],Pts[i][1], Pts[i+1][0], Pts[i+1][1],Qbd,Pbd,bd_marker))#Neumann, Dirchlet
def Add_Circle(Pts_c=(0,0),R=1,Nbd=5,Qbd=10.0,Pbd=10.0,bd_marker=-1,panels=[]):
"""Creates a BE along a circle boundary. Clockwise for well discretization, it decides the outward normal direction
Arguments
---------
xc, yc -- Cartesian coordinates of the circle center.
R -- The radius of the circle
Nbd -- Number of elements in this line
Qbd -- Neumann B.C
Pbd -- Dirchelet B.C
Panels -- BE container, return value
"""
# Generalize the image well position/Define the image panel around the computation domain
Pts=EndPointOnCircle(Pts_c,R=R,Nseg=Nbd)
for i in range(Nbd):
panels.append(Panel(Pts[i][0],Pts[i][1], Pts[i+1][0], Pts[i+1][1],Qbd,Pbd,bd_marker))
def Add_Well(loc=(0,0),rw=1,Qwell=0,Pwell=100,wells=[]):
"""Creates a BE along a circle boundary. Clockwise for well discretization, it decides the outward normal direction
Arguments
---------
loc -- location of the well.
rw -- The radius of wellbore
Qwell -- Wellbore flow rate
Pwell -- Bottomhole pressure
wells -- Well container, return value
"""
wells.append(Well(loc[0],loc[1],rw,Qwell,Pwell))
######################
#
# Core Code
#
######################
class WellGrid:
"""Contains information and functions related to a wellgrid."""
def __init__(self, Pts_e=[],Pts_w=[],Qw=[],Qe=[],Nbd=10,rw=0.25,h=26.25,phi=0.2,miu=1,kxy=(100,100)):
"""Creates a WellGrid with abrituary shape and multiple wells using VBEM
Arguments
---------
Ne -- Number of wellblock edges
Nw -- Number of wells
Nbd -- Number of virtual boundary elements per edge (ghost point pair).
Nbe -- Total number of boundary elements(unknows)
Qw -- Wellbore flow rate, stb/day
Qe -- Outflow edge flow rate, stb/day
h -- Wellblock thickness
phi -- Wellblock porosity
miu -- Fluid viscosity
kxy -- Principle permeability of the wellblock. e.g. [kxx,kyy]
Pts_e -- Vertices of boundary edge e.g. [(0,0),(0,1),(1,1),(1,0)]
Pts_w -- Location of well e.g. [(0.5,0.5),(0.75,0.75)]
domain_min -- minimum (x,y) for a polygon domain
domain_max -- maximum (x,y) for a polygon domain
Pts Location Scheme example
1-----2
| |
| 1.. |
| |
0-----3
Pts OutFlow Edge
0 0(0,1)
1 1(1,2)
2 2(2,3)
3 3(3,0)
Well
1
2
..
Default Sequence in wellgrid: bottom(0)-left(1)-top(2)-right(3), clockwise
Derived&Output Arguments
---------
BEs -- List of circle virtual boundary elements [virtual boundary]
Ghos -- List of boundary ghost node pairs [real boundary]
Wells -- List of wells
SL -- Array of streamlines
TOF -- Array of time-of-flight
"""
self.Pts_e=Pts_e
self.Pts_w=Pts_w
self.Ne=len(Qe)
self.Nw=len(Qw)
self.Nbd=Nbd
self.Nbe=self.Nbd*self.Ne
self.Qw=Qw
self.Qe=Qe
self.rw=rw
self.h=h
self.phi=phi
self.miu=miu
self.kx=kxy[0]
self.ky=kxy[1]
#Boundary elements
self.BEs=[]
self.Ghos=[]
self.Wells=[]
#Streamlines
self.SL=[]
self.TOF=[]
#Additional Geometory variable
self.domain_min=(min(np.asarray(self.Pts_e)[:,0]),min(np.asarray(self.Pts_e)[:,1]))
self.domain_max=(max(np.asarray(self.Pts_e)[:,0]),max(np.asarray(self.Pts_e)[:,1]))
def Meshing(self):
"""Genetrating meshing for VBEM (virtual boundary elements, ghost node pairs and wells)
Fig. 2 in in SPE-182614-MS
Arguments
---------
Origin -- Centroid of a polygon domain
R -- Minimum radius of circle which enclose the polygon domain
error -- empty space for plotting the Mesh
"""
#Circle virtual boundary elements
Origin = centroid2D(self.Pts_e)
R = 1.5*enclose.make_circle(self.Pts_e)[1]
#R=50
#print(R)
Add_Circle(Origin,R,self.Nbe,panels=self.BEs)
#Ghost node pairs and Boundary conditions
self.Pts_e.append(self.Pts_e[0])
for i in range(self.Ne):
Add_Line(self.Pts_e[i],self.Pts_e[i+1],self.Nbd,Qbd=self.Qe[i],panels=self.Ghos,bd_marker=i)
#Wells
for i in range(self.Nw):
Add_Well(self.Pts_w[i],self.rw,Qwell=self.Qw[i],wells=self.Wells)
#Mesh Plot
error=R*0.2 #empty space around the circle VBEM
plt.figure(figsize=(3, 3))
plt.axes().set(xlim=[Origin[0]-R-error, Origin[0]+R+error],
ylim=[Origin[1]-R-error, Origin[1]+R+error],aspect='equal')
#Domain boundary
plt.plot(*np.asarray(self.Pts_e).T,lw=1,color='black')
plt.scatter(*np.asarray(self.Pts_w).T,s=20,color='red')
#Virtual Boundary elements
plt.plot(np.append([BE.xa for BE in self.BEs], self.BEs[0].xa),
np.append([BE.ya for BE in self.BEs], self.BEs[0].ya),
'bo--',markersize=5);
#Ghost node pair
plt.scatter([Gho.x_a for Gho in self.Ghos], [Gho.y_a for Gho in self.Ghos], color='r',facecolors='none', s=5)
plt.scatter([Gho.x_b for Gho in self.Ghos], [Gho.y_b for Gho in self.Ghos], color='r',facecolors='none', s=5)
plt.axes().set_title('VBEM Mesh')
plt.show()
def GH_analytical(self,Pts=(0,0),BE=[]):
'''Calclate BE influence coefficient for the pressure and normal flux
Eq. (5) (6),(7),(8) in SPE-182614-MS
'''
#Transfer global coordinate point(x,y) to local coordinate
x,y=Pts[0]-BE.xa,Pts[1]-BE.ya #Eq. A-1 in SPE-182614-MS
L=BE.length
kr=self.kx/self.ky
unit_v=0.4468 #unit converstion factor
a=BE.cosalpha**2+kr*BE.sinalpha**2
b=x*BE.cosalpha+kr*BE.sinalpha*y
c=y*BE.cosalpha-x*BE.sinalpha
#dp=-70.6*self.miu/self.h/np.sqrt(self.kx*self.ky)
dv=-unit_v/self.h/self.phi*np.sqrt(kr)
#print('xy',x,y)
#print('abc',a,b,c)
#print('kr,sin,cos',kr,BE.cosalpha,BE.sinalpha)
Gij = -1/a*(
(
b*np.log(x**2-2*b*L+a*L**2+kr*y**2)
-L*a*np.log((x-L*BE.cosalpha)**2+kr*(y-L*BE.sinalpha)**2)
+2*np.sqrt(kr)*c*np.arctan((b-a*L)/np.sqrt(kr)/c)
)
-
(
b*np.log(x**2+kr*y**2)
+2*np.sqrt(kr)*c*np.arctan((b)/np.sqrt(kr)/c)
)
)
Hij_x = dv/a*(
(
BE.cosalpha*np.log(x**2-2*b*L+a*L**2+kr*y**2)
+ 2*np.sqrt(kr)*BE.sinalpha*np.arctan((a*L-b)/np.sqrt(kr)/c)
)
-
(
BE.cosalpha*np.log(x**2+kr*y**2)+2*np.sqrt(kr)*BE.sinalpha*np.arctan((-b)/np.sqrt(kr)/c)
)
)
Hij_y = dv/a*(
(
BE.sinalpha*np.log(x**2-2*b*L+a*L**2+kr*y**2)
+ 2*np.sqrt(1/kr)*BE.cosalpha*np.arctan((b-a*L)/np.sqrt(kr)/c)
)
-
(
BE.sinalpha*np.log(x**2+kr*y**2)+2*np.sqrt(1/kr)*BE.cosalpha*np.arctan((b)/np.sqrt(kr)/c)
)
)
return Gij,Hij_x,Hij_y
def GHw_analytical(self,Pts=(0,0),well=[]):
'''Calclate Well influence coefficient for the pressure and normal flux
Eq. (9),(10),(11) in SPE-182614-MS
'''
unit_v=0.8936 #0.8936 is unit converstion factor
#dp=-70.6*self.miu/self.h/np.sqrt(self.kx*self.ky)
dv=unit_v/self.h/self.phi*np.sqrt(self.kx/self.ky)
Gij=np.log((Pts[0]-well.xw)**2+(self.kx/self.ky)*(Pts[1]-well.yw)**2)
Hij_x=dv*(Pts[0]-well.xw)/((Pts[0]-well.xw)**2+(self.kx/self.ky)*(Pts[1]-well.yw)**2)
Hij_y=dv*(Pts[1]-well.yw)/((Pts[0]-well.xw)**2+(self.kx/self.ky)*(Pts[1]-well.yw)**2)
return Gij,Hij_x,Hij_y
def FlowSol(self):
'''Solve the flow field (build matrix) using VBEM
Eq. (13) in SPE-182614-MS
'''
#1. Build matrix A
MatA=np.zeros((self.Nbe,self.Nbe))
for i, Gho in enumerate(self.Ghos): #target ghost pairs nodes
for j, BE in enumerate(self.BEs): #BE source
Pts_a=Gho.x_a,Gho.y_a
Pts_b=Gho.x_b,Gho.y_b
MatA[i,j]=self.GH_analytical(Pts_a,BE)[0]-self.GH_analytical(Pts_b,BE)[0]
#2. Build matrix RHS
RHS_well=np.zeros((self.Nbe))
RHS_Neumann=np.zeros((self.Nbe))
for i, Gho in enumerate(self.Ghos): #target ghost pairs nodes
tempRHS=0.0
for j, Well in enumerate(self.Wells): #BE source
Pts_a=Gho.x_a,Gho.y_a
Pts_b=Gho.x_b,Gho.y_b
tempRHS=tempRHS+(self.GHw_analytical(Pts_a,Well)[0]-self.GHw_analytical(Pts_b,Well)[0])*Well.Q
RHS_well[i]=tempRHS
for i in range(self.Ne):#Boundary conditions
for j, Gho in enumerate(self.Ghos):#Corrsponding elements
if(Gho.marker==i):
#print(i,Gho.Qbd)
Lbd=CalcDist(self.Pts_e[i],self.Pts_e[i+1])
kbd=self.kx*abs(Gho.sinalpha)+self.ky*abs(Gho.cosalpha)
if (self.kx==self.ky): kbd=self.kx
RHS_Neumann[j]=4*np.pi*Gho.Qbd*Gho.rab*np.sqrt(self.kx*self.ky)/Lbd/kbd
RHS=-RHS_well-RHS_Neumann
#4. Solve the matrix
Q_BEs=np.linalg.lstsq(MatA,RHS)[0] #Solve Equation
for i, BE in enumerate(self.BEs):
BE.Q=Q_BEs[i]
#np.savetxt('MatA.csv',MatA)
#np.savetxt('Matb.csv',RHS)
#print(Q_BEs)
def FieldSol(self,Pts=(0,0)):
'''Calculate the pressure and velocity at any point (x,y)
Eq. (2) (3) and (4) in SPE-182614-MS
'''
unit_p=70.6 #unit converstion factor
dp=-unit_p*self.miu/self.h/np.sqrt(self.kx*self.ky)
p=u=v=0.0
for i,BE in enumerate(self.BEs):
puv=self.GH_analytical(Pts,BE)
p=p+BE.Q*dp*puv[0]
u=u+BE.Q*puv[1]
v=v+BE.Q*puv[2]
for i,Well in enumerate(self.Wells):
puv_w=self.GHw_analytical(Pts,Well)
p=p+Well.Q*dp*puv_w[0]
u=u+Well.Q*puv_w[1]
v=v+Well.Q*puv_w[2]
return p,u,v
def FieldPlot(self,vmax=100.01):
'''Plot pressure&velocity field and Preview the streamline
'''
#Calculate pressure and velocity field
from matplotlib import path
Polygon = path.Path(self.Pts_e)
N = 30 # number of points in the x and y directions
error=1e-6
xmin,ymin=self.domain_min[0]+error,self.domain_min[1]+error
xmax,ymax=self.domain_max[0]-error,self.domain_max[1]-error
X, Y = np.meshgrid(np.linspace(xmin, xmax, N), np.linspace(ymin, ymax, N)) # generates a mesh grid
#Calculate the velocity and pressure field
p = np.empty((N, N), dtype=float)
u = np.empty((N, N), dtype=float)
v = np.empty((N, N), dtype=float)
for i in range(N):
for j in range(N):
Pts=(X[i,j], Y[i,j])
flag=Polygon.contains_points([Pts])
#print(Pts,flag)
#flag=True
if (flag==True):
puv=self.FieldSol(Pts)
p[i,j],u[i,j],v[i,j]=puv[0],puv[1],puv[2]
else:#point is not within the domain
p[i,j]=u[i,j]=v[i,j]= "nan"
fig, axes = plt.subplots(ncols=3,figsize=(10, 10))
Vtotal= np.sqrt(u**2+v**2)
if (vmax==100.01):
Vtotal_max=np.nanmax(Vtotal.flatten())
else:
Vtotal_max=vmax
from mpl_toolkits.axes_grid1 import make_axes_locatable
for i, ax in enumerate(axes.flat):
ax.set(xlim=[xmin-error, xmax+error],ylim=[ymin-error, ymax+error],aspect='equal')
ax.plot(*np.asarray(self.Pts_e).T,lw=1,color='black')
ax.scatter(*np.asarray(self.Pts_w).T,s=20,color='red')
if i==0:
ax.set_title(r'Velocity Contour')
level = np.linspace(0, Vtotal_max, 15, endpoint=True)
im=ax.contour(X, Y, Vtotal,level,linewidths=1.2)
if i==1:
ax.set_title(r'Velocity Field')
#im=ax.pcolormesh(X,Y,Vtotal,vmax=Vtotal_max)
extent=(xmin,xmax,ymin,ymax)
im=ax.imshow(Vtotal,vmin=0,vmax=Vtotal_max,extent=extent,origin='lower',interpolation='nearest')
if i==2:
import matplotlib.colors as colors
import warnings
warnings.filterwarnings('ignore') #hide the warnning when "nan" involves
ax.set_title(r'Streamline Preview')
level=colors.Normalize(vmin=0,vmax=Vtotal_max)
strm=ax.streamplot(X, Y, u, v,color=Vtotal,norm=level)
im=strm.lines
#Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "10%", pad=0.15)
fig.colorbar(im,cax=cax) # draw colorbar
fig.tight_layout()
plt.savefig('Field Plot.png',dpi=300)
plt.show()
return p,u,v
def RungeKutta(self,Pts1=(0,0),TOF1=0.0,dt=0.0003,method="RK2",tol=0.05,debug=0):
"""Runge-kutta method for tracing streamline
Method list:
1. 2th order Runge-kutta method
2. 4th order Runge-kutta method
3. Adaptive method with given maximum distance for a specific dt
maximum travel distance=tol*the minimum radius of enclose circle
"""
if (method=="RK2"):
#Step1
puv1=self.FieldSol(Pts1)
Vx1=[puv1[1],puv1[2]]
Pts2=Pts1+np.multiply(Vx1,dt)
#print(Pts1,Vx1,dt,Pts2)
#Step2
puv2=self.FieldSol(Pts2)
Vx2=[puv2[1],puv2[2]]
Pts_RK2=Pts1+np.multiply(np.add(Vx1,Vx2),0.5*dt)
#print(Pts1,Vx2,dt,Pts_RK2)
Pts=Pts_RK2
if (method=="RK4"):#Eq. C1-C6
#Step1
puv1=self.FieldSol(Pts1)
Vx1=[puv1[1],puv1[2]]
kuv1=Vx1*dt
Pts2=Pts1+0.5*kuv1
#Step2
puv2=self.FieldSol(Pts2)
Vx2=[puv2[1],puv2[2]]
kuv2=Vx2*dt
Pts3=Pts1+0.5*kuv2
#Step3
puv3=self.FieldSol(Pts3)
Vx3=[puv3[1],puv3[2]]
kuv3=Vx3*dt
Pts4=Pts1+0.5*kuv3
#Step4
puv4=self.FieldSol(Pts4)
Vx4=[puv4[1],puv4[2]]
kuv4=Vx4*dt
Pts_RK4=Pts1+(kuv1+2*kuv2+2*kuv3+kuv4)/6
Pts=Pts_RK4
if (method=='Adaptive'):
maxdist=enclose.make_circle(self.Pts_e)[1]*tol
t=0.0
count=0
dt_sub=0.0
#print('Start Point',Pts1)
while (t<dt): #tracing streamline until it hit the boundary
puv=self.FieldSol(Pts1)
Vx1=[puv[1],puv[2]]
V_Pts=np.sqrt(puv[1]**2+puv[2]**2)
dt_sub=maxdist/V_Pts
Pts_adaptive=Pts1+np.multiply(Vx1,dt_sub)
#print('from',Pts1,'to',Pts_adaptive,'V',V_Pts,'dt',dt_sub,'t',t)
t=t+dt_sub
count=count+1
Pts1=Pts_adaptive
if (count>30):
#print('Diverging!')
break
#print('Velocity',V_Pts,'MiniR',maxdist/tol,'MaximumDist',maxdist)
if (debug): print('EndPoint',Pts_adaptive,'sub-dt',dt_sub,'sub-steps',count)
Pts=Pts_adaptive
dt=t
TOF=np.add(TOF1,dt)
return Pts,TOF
def SLtrace(self,NSL=10,deltaT=0.1,method='adaptive',tol=0.05,debug=0):
"""Trace Streamlines in the wellgrid using runge-kutta method
Arguments
---------
NSL -- Total Number of Streamline
NSL_w -- Streamline number for each well
method -- Numerical intergation method. RK2, RK4 and Adaptive are provided
Output
---------
TOF -- TOF array of of the [NSL] streamlines
SL -- nodes array of of the [NSL] streamlines
TOF_end -- a boundary TOF list of the [NSL] streamlines
SL_end -- a boundary node list of the [NSL] streamlines
"""
self.SL=[]
self.TOF=[]
TOF_end=[]
SL_end=[]
#Genetrating start points of streamline
NSL_w=np.zeros((self.Nw))
SL_count=0
for i in range(self.Nw):
NSL_w[i]=int(self.Qw[i]/sum(self.Qw)*NSL)
for j in range(int(NSL_w[i])):
Pts0=[[self.Wells[i].xw+self.Wells[i].rw*np.cos(j*2*np.pi/NSL_w[i]),
self.Wells[i].yw+self.Wells[i].rw*np.sin(j*2*np.pi/NSL_w[i])]]
TOF0=[[0.0]]
self.SL.append(Pts0)
self.TOF.append(TOF0)
NSL=sum(NSL_w) #update the streamline number
#Tracing SL using RK algorithm
for i in range(int(NSL)):
j=0
flag=False
Pts0=(0,0)
TOF0=0.0
while (flag==False): #tracing streamline until it hit the boundary
Pts0=self.SL[i][j]
TOF0=self.TOF[i][j]
Pts1,TOF1=self.RungeKutta(Pts0,TOF0,dt=deltaT,method=method,tol=tol,debug=debug)
self.SL[i].append(Pts1)
self.TOF[i].append(TOF1)
j=j+1
#print('j',j,self.SL)
#print(self.TOF)
#Check boundary hit
for k in range(self.Ne):
SLseg=(Pts0,Pts1)
BD=(self.Pts_e[k],self.Pts_e[k+1])
flag=LineSegIntersect(SLseg,BD)
if (flag==True): break
if (flag==True):
TOF_end.append(TOF0)
SL_end.append(Pts0)
#if j==10: break
#Plot Streamline
plt.figure(figsize=(3, 3))
plt.axes().set(xlim=[self.domain_min[0], self.domain_max[0]],ylim=[self.domain_min[1], self.domain_max[1]],aspect='equal')
plt.title('Streamlines')
#Domain boundary
plt.plot(*np.asarray(self.Pts_e).T,lw=1,color='black')
plt.scatter(*np.asarray(self.Pts_w).T,s=20,color='red')
#Streamline
for i in range(len(self.SL)):
plt.plot(*np.asarray(self.SL[i]).T,lw=1,marker='o',markersize=0,color='blue')
plt.show()
return self.SL,self.TOF,SL_end,TOF_end | bsd-3-clause |
datapythonista/pandas | pandas/core/indexers.py | 1 | 16639 | """
Low-dependency indexing utilities.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._typing import (
Any,
AnyArrayLike,
ArrayLike,
)
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
if TYPE_CHECKING:
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if ndim == 1 and is_integer(indexer):
# GH37748: allow indexer to be an integer for Series
return True
if isinstance(indexer, tuple) and len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = (indexer,)
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value) and values.ndim == 1:
# boolean with truth values == len of the value is ok too
if isinstance(indexer, list):
indexer = np.array(indexer)
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:
# In case of two dimensional value is used row-wise and broadcasted
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices(np.array([1, 2]), 3) # OK
>>> validate_indices(np.array([1, -2]), 3)
Traceback (most recent call last):
...
ValueError: negative dimensions are not allowed
>>> validate_indices(np.array([1, 2, 3]), 3)
Traceback (most recent call last):
...
IndexError: indices are out-of-bounds
>>> validate_indices(np.array([-1, -1]), 0) # OK
>>> validate_indices(np.array([0, 1]), 0)
Traceback (most recent call last):
...
IndexError: indices are out-of-bounds
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int, verify: bool = True):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
verify : bool, default True
Check that all entries are between 0 and n - 1, inclusive.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
if verify:
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def is_exact_shape_match(target: ArrayLike, value: ArrayLike) -> bool:
"""
Is setting this value into this target overwriting the entire column?
Parameters
----------
target : np.ndarray or ExtensionArray
value : np.ndarray or ExtensionArray
Returns
-------
bool
"""
return (
len(value.shape) > 0
and len(target.shape) > 0
and value.shape[0] == target.shape[0]
and value.size == target.size
)
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif isinstance(indexer, range):
return (indexer.stop - indexer.start) // indexer.step
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel: int = 3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
def check_key_length(columns: Index, key, value: DataFrame):
"""
Checks if a key used as indexer has the same length as the columns it is
associated with.
Parameters
----------
columns : Index The columns of the DataFrame to index.
key : A list-like of keys to index with.
value : DataFrame The value to set for the keys.
Raises
------
ValueError: If the length of key is not equal to the number of columns in value
or if the number of columns referenced by key is not equal to number
of columns.
"""
if columns.is_unique:
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
else:
# Missing keys in columns are represented as -1
if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):
raise ValueError("Columns must be same length as key")
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
| bsd-3-clause |
calvingit21/h2o-2 | py/testdir_multi_jvm/test_GLM2grid_hastie.py | 9 | 2649 | import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting parse of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=csvFilename + ".hex", timeoutSecs=20)
y = "10"
# NOTE: hastie has two values, -1 and 1. To make H2O work if two valued and not 0,1 have
kwargs = {
'response': y,
'max_iter': 10,
'n_folds': 2,
'lambda': '1e-8,1e-4,1e-3',
'alpha': '0,0.25,0.8',
}
start = time.time()
print "\nStarting GLMGrid of", csvFilename
glmGridResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLMGrid in", (time.time() - start), "secs (python)"
# still get zero coeffs..best model is AUC = 0.5 with intercept only.
h2o_glm.simpleCheckGLMGrid(self,glmGridResult, allowZeroCoeff=True,**kwargs)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(2,java_heap_GB=5)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2grid_hastie(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=300)
fullPathname = h2i.find_folder_and_filename('home-0xdiag-datasets', csvPathname, returnFullPath=True)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_doit(self, filename2x, None, pathname2x, timeoutSecs=300)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
sseyler/PSAnalysisTutorial | pair_id.py | 2 | 6738 | import pandas as pd
import numpy as np
import itertools as it
class PairID(object):
"""Identify simulations in :class:`PSA` by method names and run numbers.
This is a rough convenience class that facilitates intuitive access to the
data generated by :class:`PSA`. PSA is based on performing all-pairs
comparisons, so that the data generated for a comparison between a pair of
simulations can be identified by (distance) matrix indices. Since the data
that :class:`PSA` can generate between a pair of simulations can be very
heterogeneous (i.e., data from Hausdorff pairs analyses), the data for all
the pairs is stored sequentially in a one-dimensional list whose order is
identical to the corresponding distance vector (of the distance matrix).
Simulations are added with :meth:`PairID.add_sim`, which takes the
name of the method and a list-like sequence of numbers corresponding the
runs that were performed. Once all simulations are added, the data
generated for a given pair of simulations can be accessed by first using
:meth:`PairID.get_pair_id` to get the comparison index and then
using that index to extract the data in :class:`PSA` stored in distance
vector form.
Notes::
1) The names and run labeling used for method that are added to
PairID do not need to be identical to those used for the
corresponding analysis in PSA. However, it is useful to keep the naming
scheme similar so that one can correctly identify simulations by name.
2) Currently, there is no mechanism to remove simulations from
:class:`PairID`, which requires modifying the simulation IDs in a
predictable manner. This feature may be added in the future. A user should
add simulations in a way that corresponds to how the simulations were added
to :class:`PSA`.
Example::
Obtain the frames corresponding to the Hausdorff pair of the second DIMS
simulation (i.e., DIMS 2) and third rTMD-F (i.e., rTMD-F 3) simulation
among a total of four methods with three runs each (consecutively labeled
'1','2','3'):
>>> method_names = ['DIMS','FRODA','rTMD-F','rTMD-S']
>>> identifier = PairID()
>>> for name in method_names:
>>> identifier.add_sim(name, [1,2,3])
>>> ID = identifier.get_pair_id('DIMS 2', 'rTMD-F 3')
>>> # Assume the simulations have been added to PSA as MDAnalysis Universes
>>> # in the above order (DIMS 1, ..., DIMS 3, FRODA 1, ..., FRODA 3, ...,
rTMD-S 2, rTMD-S 3)
>>> psa_hpa = PSA(universes, path_select='name CA')
>>> psa_hpa.generate_paths()
>>> psa_hpa.run_hausdorff_pairs_analysis(hausdorff_pairs=True)
>>> psa_hpa.HP['frames']
"""
def __init__(self):
"""Initialize a :class:`PairID` object.
Sets up labels for method names and run labels (IDs) and initializes a
pandas DataFrame object.
"""
self.ilbl = ['Name', 'Run ID']
self.clbl = ['Sim ID']
self.data = pd.DataFrame()
self.num_sims = 0
self.num_methods = 0
def add_sim(self, method, run_ids):
"""Add a simulation method and its run label to :class:`PairID`.
:Arguments:
*method*
string, name of the simulation method
*run_ids*
array-like, the number labels of the runs performed for *method*
"""
num_new_sims = len(run_ids)
tuples = list(it.product([method], run_ids))
df_idx = pd.MultiIndex.from_tuples(tuples, names=self.ilbl)
sim_ids = np.asarray(xrange(num_new_sims)) + self.num_sims
df_new = pd.DataFrame(sim_ids, df_idx, self.clbl)
self.data = self.data.append(df_new)
self.num_sims += num_new_sims
self.num_methods += 1 #len(self.data[self.column[0]].count())
def dvectorform(self, i, j):
"""Convert simulation IDs to a Pair ID.
Simulations added to :class:`PairID` are indexed by a unique
integer ID. Two integer IDs correspond to a location in the matrix
representing all comparison between pairs of simulations in PSA. The
comparison matrix indices care converted to an index in a corresponding
comparison vector analogously to conversion between a distance matrix
and a distance vector.
:Arguments:
*i*
int, row index
*j*
int, column index
:Returns:
int, the Pair ID of the pair of simulations *i* and *j*
"""
if i == j:
raise ValueError("Indices cannot have the same value.")
if j < i:
temp, i = i, j
j = temp
return (self.num_sims*i) + j - (i+2)*(i+1)/2
def get_pair_id(self, sim1, sim2, vectorform=True):
"""Get the Pair ID of a pair of simulations.
Note: the names of simulations are assumed to take the following form:
'<Name> <Run ID>', e.g., 'DIMS 1' or 'rTMD-S 2'.
:Arguments:
*sim1*
string, name of first simulation in comparison
*sim2*
string, name of first simulation in comparison
:Returns:
int, the Pair ID of the comparison between *sim1* and *sim2*
"""
i, j = self.get_sim_id(sim1), self.get_sim_id(sim2)
try:
return self.dvectorform(i, j) if vectorform else (i, j)
except ValueError:
print("Must enter two different simulations.")
def get_sim_id(self, sim):
"""Obtain the simulation ID of a given simulation.
Note: the names of simulations are assumed to take the following form:
'<Name> <Run ID>', e.g., 'DIMS 1' or 'rTMD-S 2'.
:Arguments:
*sim*
string, full simulation name with run label separated by a space
:Returns:
int, the simulation ID
"""
sim_tuple = self._str2tup(sim)
return self.data.loc[sim_tuple, self.clbl[0]]
def _str2tup(self, name):
"""Return a string of the form '<name> <number>' as a tuple with the
form ('<name>', <number>).
:Arguments:
*name*
string, name and number separated by a space
:Returns:
(string, int), the tuple containing the name and number
"""
method, run_id = name.split()
return method, int(run_id)
def get_num_sims(self):
"""
:Returns:
int, total number of simulations in :class:`PairID`
"""
return self.num_sims
def get_num_methods(self):
"""
:Returns:
int, total number of simulation methods in :class:`PairID`
"""
return self.num_method | gpl-3.0 |
johnson1228/pymatgen | pymatgen/analysis/chemenv/coordination_environments/structure_environments.py | 1 | 89627 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains objects that are used to describe the environments in a structure. The most detailed object
(StructureEnvironments) contains a very thorough analysis of the environments of a given atom but is difficult to
used as such. The LightStructureEnvironments object is a lighter version that is obtained by applying a "strategy"
on the StructureEnvironments object. Basically, the LightStructureEnvironments provides the coordination environment(s)
and possibly some fraction corresponding to these.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "Feb 20, 2016"
import numpy as np
from collections import OrderedDict
from pymatgen.core.sites import PeriodicSite
from monty.json import MSONable, MontyDecoder
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.structure import Structure
from monty.json import jsanitize
from pymatgen.analysis.chemenv.coordination_environments.voronoi import DetailedVoronoiContainer
from pymatgen.analysis.chemenv.utils.chemenv_errors import ChemenvError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
allcg = AllCoordinationGeometries()
symbol_cn_mapping = allcg.get_symbol_cn_mapping()
class StructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure.
"""
AC = AdditionalConditions()
class NeighborsSet():
"""
Class used to store a given set of neighbors of a given site (based on the detailed_voronoi).
"""
def __init__(self, structure, isite, detailed_voronoi, site_voronoi_indices, sources=None):
self.structure = structure
self.isite = isite
self.detailed_voronoi = detailed_voronoi
self.voronoi = detailed_voronoi.voronoi_list2[isite]
myset = set(site_voronoi_indices)
if len(myset) != len(site_voronoi_indices):
raise ValueError('Set of neighbors contains duplicates !')
self.site_voronoi_indices = sorted(myset)
if sources is None:
self.sources = [{'origin': 'UNKNOWN'}]
elif isinstance(sources, list):
self.sources = sources
else:
self.sources = [sources]
def get_neighb_voronoi_indices(self, permutation):
return [self.site_voronoi_indices[ii] for ii in permutation]
@property
def neighb_coords(self):
return [self.voronoi[inb]['site'].coords for inb in self.site_voronoi_indices]
@property
def neighb_sites(self):
return [self.voronoi[inb]['site'] for inb in self.site_voronoi_indices]
@property
def neighb_sites_and_indices(self):
return [{'site': self.voronoi[inb]['site'],
'index': self.voronoi[inb]['index']} for inb in self.site_voronoi_indices]
@property
def coords(self):
coords = [self.structure[self.isite].coords]
coords.extend(self.neighb_coords)
return coords
@property
def normalized_distances(self):
return [self.voronoi[inb]['normalized_distance'] for inb in self.site_voronoi_indices]
@property
def normalized_angles(self):
return [self.voronoi[inb]['normalized_angle'] for inb in self.site_voronoi_indices]
@property
def distances(self):
return [self.voronoi[inb]['distance'] for inb in self.site_voronoi_indices]
@property
def angles(self):
return [self.voronoi[inb]['angle'] for inb in self.site_voronoi_indices]
@property
def sphere_fraction_angles(self):
return [0.25*self.voronoi[inb]['angle']/np.pi for inb in self.site_voronoi_indices]
@property
def info(self):
was = self.normalized_angles
wds = self.normalized_distances
angles = self.angles
distances = self.distances
return {'normalized_angles': was,
'normalized_distances': wds,
'normalized_angles_sum': np.sum(was),
'normalized_angles_mean': np.mean(was),
'normalized_angles_std': np.std(was),
'normalized_angles_min': np.min(was),
'normalized_angles_max': np.max(was),
'normalized_distances_mean': np.mean(wds),
'normalized_distances_std': np.std(wds),
'normalized_distances_min': np.min(wds),
'normalized_distances_max': np.max(wds),
'angles': angles,
'distances': distances,
'angles_sum': np.sum(angles),
'angles_mean': np.mean(angles),
'angles_std': np.std(angles),
'angles_min': np.min(angles),
'angles_max': np.max(angles),
'distances_mean': np.mean(distances),
'distances_std': np.std(distances),
'distances_min': np.min(distances),
'distances_max': np.max(distances)
}
def distance_plateau(self):
all_nbs_normalized_distances_sorted = sorted([nb['normalized_distance'] for nb in self.voronoi],
reverse=True)
maxdist = np.max(self.normalized_distances)
plateau = None
for idist, dist in enumerate(all_nbs_normalized_distances_sorted):
if np.isclose(dist, maxdist,
rtol=0.0, atol=self.detailed_voronoi.normalized_distance_tolerance):
if idist == 0:
plateau = np.inf
else:
plateau = all_nbs_normalized_distances_sorted[idist-1] - maxdist
break
if plateau is None:
raise ValueError('Plateau not found ...')
return plateau
def angle_plateau(self):
all_nbs_normalized_angles_sorted = sorted([nb['normalized_angle'] for nb in self.voronoi])
minang = np.min(self.normalized_angles)
print('minang', minang)
print('all_nbs_normalized_angles_sorted', all_nbs_normalized_angles_sorted)
for nb in self.voronoi:
print(nb)
plateau = None
for iang, ang in enumerate(all_nbs_normalized_angles_sorted):
if np.isclose(ang, minang,
rtol=0.0, atol=self.detailed_voronoi.normalized_angle_tolerance):
if iang == 0:
plateau = minang
else:
plateau = minang - all_nbs_normalized_angles_sorted[iang-1]
break
if plateau is None:
raise ValueError('Plateau not found ...')
return plateau
def voronoi_grid_surface_points(self, additional_condition=1, other_origins='DO_NOTHING'):
"""
Get the surface points in the Voronoi grid for this neighbor from the sources.
The general shape of the points should look like a staircase such as in the following figure :
^
0.0|
|
| B----C
| | |
| | |
a | k D-------E
n | | |
g | | |
l | | |
e | j F----n---------G
| | |
| | |
| A----g-------h----i---------H
|
|
1.0+------------------------------------------------->
1.0 distance 2.0 ->+Inf
:param additional_condition: Additional condition for the neighbors.
:param other_origins: What to do with sources that do not come from the Voronoi grid (e.g. "from hints")
"""
mysrc = []
for src in self.sources:
if src['origin'] == 'dist_ang_ac_voronoi':
if src['ac'] != additional_condition:
continue
mysrc.append(src)
else:
if other_origins == 'DO_NOTHING':
continue
else:
raise NotImplementedError('Nothing implemented for other sources ...')
if len(mysrc) == 0:
return None
dists = [src['dp_dict']['min'] for src in mysrc]
angs = [src['ap_dict']['max'] for src in mysrc]
next_dists = [src['dp_dict']['next'] for src in mysrc]
next_angs = [src['ap_dict']['next'] for src in mysrc]
points_dict = OrderedDict()
pdists = []
pangs = []
for isrc in range(len(mysrc)):
if not any(np.isclose(pdists, dists[isrc])):
pdists.append(dists[isrc])
if not any(np.isclose(pdists, next_dists[isrc])):
pdists.append(next_dists[isrc])
if not any(np.isclose(pangs, angs[isrc])):
pangs.append(angs[isrc])
if not any(np.isclose(pangs, next_angs[isrc])):
pangs.append(next_angs[isrc])
d1_indices = np.argwhere(np.isclose(pdists, dists[isrc])).flatten()
if len(d1_indices) != 1:
raise ValueError('Distance parameter not found ...')
d2_indices = np.argwhere(np.isclose(pdists, next_dists[isrc])).flatten()
if len(d2_indices) != 1:
raise ValueError('Distance parameter not found ...')
a1_indices = np.argwhere(np.isclose(pangs, angs[isrc])).flatten()
if len(a1_indices) != 1:
raise ValueError('Angle parameter not found ...')
a2_indices = np.argwhere(np.isclose(pangs, next_angs[isrc])).flatten()
if len(a2_indices) != 1:
raise ValueError('Angle parameter not found ...')
id1 = d1_indices[0]
id2 = d2_indices[0]
ia1 = a1_indices[0]
ia2 = a2_indices[0]
for id_ia in [(id1, ia1), (id1, ia2), (id2, ia1), (id2, ia2)]:
if id_ia not in points_dict:
points_dict[id_ia] = 0
points_dict[id_ia] += 1
new_pts = []
for pt, pt_nb in points_dict.items():
if pt_nb % 2 == 1:
new_pts.append(pt)
sorted_points = [(0, 0)]
move_ap_index = True
while True:
last_pt = sorted_points[-1]
if move_ap_index: # "Move" the angle parameter
idp = last_pt[0]
iap = None
for pt in new_pts:
if pt[0] == idp and pt != last_pt:
iap = pt[1]
break
else: # "Move" the distance parameter
idp = None
iap = last_pt[1]
for pt in new_pts:
if pt[1] == iap and pt != last_pt:
idp = pt[0]
break
if (idp, iap) == (0, 0):
break
if (idp, iap) in sorted_points:
raise ValueError('Error sorting points ...')
sorted_points.append((idp, iap))
move_ap_index = not move_ap_index
points = [(pdists[idp], pangs[iap]) for (idp, iap) in sorted_points]
return points
@property
def source(self):
if len(self.sources) != 1:
raise RuntimeError('Number of sources different from 1 !')
return self.sources[0]
def add_source(self, source):
if source not in self.sources:
self.sources.append(source)
def __len__(self):
return len(self.site_voronoi_indices)
def __hash__(self):
return len(self.site_voronoi_indices)
def __eq__(self, other):
return self.isite == other.isite and self.site_voronoi_indices == other.site_voronoi_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = 'Neighbors Set for site #{:d} :\n'.format(self.isite)
out += ' - Coordination number : {:d}\n'.format(len(self))
out += ' - Voronoi indices : {}\n'.format(', '.join(['{:d}'.format(site_voronoi_index)
for site_voronoi_index in self.site_voronoi_indices]))
return out
def as_dict(self):
return {'isite': self.isite,
'site_voronoi_indices': self.site_voronoi_indices,
'sources': self.sources}
@classmethod
def from_dict(cls, dd, structure, detailed_voronoi):
return cls(structure=structure,
isite=dd['isite'],
detailed_voronoi=detailed_voronoi,
site_voronoi_indices=dd['site_voronoi_indices'],
sources=dd['sources'])
def __init__(self, voronoi, valences, sites_map, equivalent_sites,
ce_list, structure, neighbors_sets=None, info=None):
"""
Constructor for the StructureEnvironments object.
:param voronoi: VoronoiContainer object for the structure
:param valences: Valences provided
:param sites_map: Mapping of equivalent sites to the unequivalent sites that have been computed.
:param equivalent_sites: List of list of equivalent sites of the structure
:param struct_sites_to_irreducible_site_list_map: Maps the index of a site to the index of the item in the
list of equivalent sites to which the site belongs.
:param ce_list: List of chemical environments
:param structure: Structure object
"""
self.voronoi = voronoi
self.valences = valences
self.sites_map = sites_map
self.equivalent_sites = equivalent_sites
#self.struct_sites_to_irreducible_site_list_map = struct_sites_to_irreducible_site_list_map
self.ce_list = ce_list
self.structure = structure
if neighbors_sets is None:
self.neighbors_sets = [None] * len(self.structure)
else:
self.neighbors_sets = neighbors_sets
self.info = info
def init_neighbors_sets(self, isite, additional_conditions=None, valences=None):
site_voronoi = self.voronoi.voronoi_list2[isite]
if site_voronoi is None:
return
if additional_conditions is None:
additional_conditions = self.AC.ALL
if (self.AC.ONLY_ACB in additional_conditions or
self.AC.ONLY_ACB_AND_NO_E2SEB) and valences is None:
raise ChemenvError('StructureEnvironments', 'init_neighbors_sets',
'Valences are not given while only_anion_cation_bonds are allowed. Cannot continue')
site_distance_parameters = self.voronoi.neighbors_normalized_distances[isite]
site_angle_parameters = self.voronoi.neighbors_normalized_angles[isite]
# Precompute distance conditions
distance_conditions = []
for idp, dp_dict in enumerate(site_distance_parameters):
distance_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in dp_dict['nb_indices']
distance_conditions[idp].append(cond)
# Precompute angle conditions
angle_conditions = []
for iap, ap_dict in enumerate(site_angle_parameters):
angle_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in ap_dict['nb_indices']
angle_conditions[iap].append(cond)
# Precompute additional conditions
precomputed_additional_conditions = {ac: [] for ac in additional_conditions}
for inb, voro_nb_dict in enumerate(site_voronoi):
for ac in additional_conditions:
cond = self.AC.check_condition(condition=ac, structure=self.structure,
parameters={'valences': valences,
'neighbor_index': voro_nb_dict['index'],
'site_index': isite})
precomputed_additional_conditions[ac].append(cond)
# Add the neighbors sets based on the distance/angle/additional parameters
for idp, dp_dict in enumerate(site_distance_parameters):
for iap, ap_dict in enumerate(site_angle_parameters):
for iac, ac in enumerate(additional_conditions):
src = {'origin': 'dist_ang_ac_voronoi',
'idp': idp, 'iap': iap, 'dp_dict': dp_dict, 'ap_dict': ap_dict,
'iac': iac, 'ac': ac, 'ac_name': self.AC.CONDITION_DESCRIPTION[ac]}
site_voronoi_indices = [inb for inb, voro_nb_dict in enumerate(site_voronoi)
if (distance_conditions[idp][inb] and
angle_conditions[iap][inb] and
precomputed_additional_conditions[ac][inb])]
nb_set = self.NeighborsSet(structure=self.structure,
isite=isite,
detailed_voronoi=self.voronoi,
site_voronoi_indices=site_voronoi_indices,
sources=src)
self.add_neighbors_set(isite=isite, nb_set=nb_set)
def add_neighbors_set(self, isite, nb_set):
if self.neighbors_sets[isite] is None:
self.neighbors_sets[isite] = {}
self.ce_list[isite] = {}
cn = len(nb_set)
if cn not in self.neighbors_sets[isite]:
self.neighbors_sets[isite][cn] = []
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
self.neighbors_sets[isite][cn][nb_set_index].add_source(nb_set.source)
except ValueError:
self.neighbors_sets[isite][cn].append(nb_set)
self.ce_list[isite][cn].append(None)
def update_coordination_environments(self, isite, cn, nb_set, ce):
if self.ce_list[isite] is None:
self.ce_list[isite] = {}
if cn not in self.ce_list[isite]:
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
raise ValueError('Neighbors set not found in the structure environments')
if nb_set_index == len(self.ce_list[isite][cn]):
self.ce_list[isite][cn].append(ce)
elif nb_set_index < len(self.ce_list[isite][cn]):
self.ce_list[isite][cn][nb_set_index] = ce
else:
raise ValueError('Neighbors set not yet in ce_list !')
def update_site_info(self, isite, info_dict):
if 'sites_info' not in self.info:
self.info['sites_info'] = [{} for _ in range(len(self.structure))]
self.info['sites_info'][isite].update(info_dict)
def get_coordination_environments(self, isite, cn, nb_set):
if self.ce_list[isite] is None:
return None
if cn not in self.ce_list[isite]:
return None
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
return None
return self.ce_list[isite][cn][nb_set_index]
def get_csm(self, isite, mp_symbol):
csms = self.get_csms(isite, mp_symbol)
if len(csms) != 1:
raise ChemenvError('StructureEnvironments',
'get_csm',
'Number of csms for site #{} with '
'mp_symbol "{}" = {}'.format(str(isite),
mp_symbol,
str(len(csms))))
return csms[0]
def get_csms(self, isite, mp_symbol):
"""
Returns the continuous symmetry measure(s) of site with index isite with respect to the
perfect coordination environment with mp_symbol. For some environments, a given mp_symbol might not
be available (if there is no voronoi parameters leading to a number of neighbours corresponding to
the coordination number of environment mp_symbol). For some environments, a given mp_symbol might
lead to more than one csm (when two or more different voronoi parameters lead to different neighbours
but with same number of neighbours).
:param isite: Index of the site
:param mp_symbol: MP symbol of the perfect environment for which the csm has to be given
:return: List of csms for site isite with respect to geometry mp_symbol
"""
cn = symbol_cn_mapping[mp_symbol]
if cn not in self.ce_list[isite]:
return []
else:
return [envs[mp_symbol] for envs in self.ce_list[isite][cn]]
def plot_csm_and_maps(self, isite, max_csm=8.0):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: Nothing returned, just plot the figure
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
fig = self.get_csm_and_maps(isite=isite, max_csm=max_csm)
if fig is None:
return
plt.show()
def get_csm_and_maps(self, isite, max_csm=8.0, figsize=None, symmetry_measure_type=None):
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
if symmetry_measure_type is None:
symmetry_measure_type = 'csm_wcs_ctwcc'
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
gs = GridSpec(2, 1, hspace=0.0, wspace=0.0)
subplot = fig.add_subplot(gs[:])
subplot_distang = subplot.twinx()
ix = 0
cn_maps = []
all_wds = []
all_was = []
max_wd = 0.0
for cn, nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
continue
mingeoms = ce.minimum_geometries(max_csm=max_csm)
if len(mingeoms) == 0:
continue
wds = nb_set.normalized_distances
max_wd = max(max_wd, max(wds))
all_wds.append(wds)
all_was.append(nb_set.normalized_angles)
for mp_symbol, cg_dict in mingeoms:
csm = cg_dict['other_symmetry_measures'][symmetry_measure_type]
subplot.plot(ix, csm, 'ob')
subplot.annotate(mp_symbol, xy = (ix, csm))
cn_maps.append((cn, inb_set))
ix += 1
if max_wd < 1.225:
ymax_wd = 1.25
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.36:
ymax_wd = 1.4
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.45:
ymax_wd = 1.5
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.55:
ymax_wd = 1.6
yticks_wd = np.linspace(1.0, ymax_wd, 7)
elif max_wd < 1.75:
ymax_wd = 1.8
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.95:
ymax_wd = 2.0
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 2.35:
ymax_wd = 2.5
yticks_wd = np.linspace(1.0, ymax_wd, 7)
else:
ymax_wd = np.ceil(1.1*max_wd)
yticks_wd = np.linspace(1.0, ymax_wd, 6)
yticks_wa = np.linspace(0.0, 1.0, 6)
frac_bottom = 0.05
frac_top = 0.05
frac_middle = 0.1
yamin = frac_bottom
yamax = 0.5 - frac_middle / 2
ydmin = 0.5 + frac_middle / 2
ydmax = 1.0 - frac_top
def yang(wa):
return (yamax-yamin) * np.array(wa) + yamin
def ydist(wd):
return (np.array(wd) - 1.0) / (ymax_wd - 1.0) * (ydmax - ydmin) + ydmin
for ix, was in enumerate(all_was):
subplot_distang.plot(0.2+ix*np.ones_like(was), yang(was), '<g')
if np.mod(ix, 2) == 0:
alpha = 0.3
else:
alpha = 0.1
subplot_distang.fill_between([-0.5+ix, 0.5+ix],
[1.0, 1.0], 0.0,
facecolor='k', alpha=alpha, zorder=-1000)
for ix, wds in enumerate(all_wds):
subplot_distang.plot(0.2+ix*np.ones_like(wds), ydist(wds), 'sm')
subplot_distang.plot([-0.5, len(cn_maps)], [0.5, 0.5], 'k--', alpha=0.5)
yticks = yang(yticks_wa).tolist()
yticks.extend(ydist(yticks_wd).tolist())
yticklabels = yticks_wa.tolist()
yticklabels.extend(yticks_wd.tolist())
subplot_distang.set_yticks(yticks)
subplot_distang.set_yticklabels(yticklabels)
fake_subplot_ang = fig.add_subplot(gs[1], frame_on=False)
fake_subplot_dist = fig.add_subplot(gs[0], frame_on=False)
fake_subplot_ang.set_yticks([])
fake_subplot_dist.set_yticks([])
fake_subplot_ang.set_xticks([])
fake_subplot_dist.set_xticks([])
fake_subplot_ang.set_ylabel('Angle parameter', labelpad=45, rotation=-90)
fake_subplot_dist.set_ylabel('Distance parameter', labelpad=45, rotation=-90)
fake_subplot_ang.yaxis.set_label_position("right")
fake_subplot_dist.yaxis.set_label_position("right")
subplot_distang.set_ylim([0.0, 1.0])
subplot.set_xticks(range(len(cn_maps)))
subplot.set_ylabel('Continuous symmetry measure')
subplot.set_xlim([-0.5, len(cn_maps)-0.5])
subplot_distang.set_xlim([-0.5, len(cn_maps)-0.5])
subplot.set_xticklabels([str(cn_map) for cn_map in cn_maps])
return fig, subplot
def get_environments_figure(self, isite, plot_type=None, title='Coordination numbers', max_dist=2.0,
additional_condition=AC.ONLY_ACB, colormap=None, figsize=None,
strategy=None):
"""
Plotting of the coordination environments of a given site for all the distfactor/angfactor regions. The
chemical environments with the lowest continuous symmetry measure is shown for each distfactor/angfactor
region as the value for the color of that distfactor/angfactor region (using a colormap).
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: The figure object to be plotted or saved to file
"""
try:
import matplotlib.pyplot as mpl
from matplotlib import cm
from matplotlib.colors import Normalize, LinearSegmentedColormap, ListedColormap
from matplotlib.patches import Rectangle, Polygon
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
#Initializes the figure
if figsize is None:
fig = mpl.figure()
else:
fig = mpl.figure(figsize=figsize)
subplot = fig.add_subplot(111)
#Initializes the distance and angle parameters
if plot_type is None:
plot_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized_inverted', None)}
if colormap is None:
mycm = cm.jet
else:
mycm = colormap
mymin = 0.0
mymax = 10.0
norm = Normalize(vmin=mymin, vmax=mymax)
scalarmap = cm.ScalarMappable(norm=norm, cmap=mycm)
dist_limits = [1.0, max_dist]
ang_limits = [0.0, 1.0]
if plot_type['distance_parameter'][0] == 'one_minus_inverse_alpha_power_n':
if plot_type['distance_parameter'][1] is None:
exponent = 3
else:
exponent = plot_type['distance_parameter'][1]['exponent']
xlabel = 'Distance parameter : $1.0-\\frac{{1.0}}{{\\alpha^{{{:d}}}}}$'.format(exponent)
def dp_func(dp):
return 1.0-1.0/np.power(dp, exponent)
elif plot_type['distance_parameter'][0] == 'initial_normalized':
xlabel = 'Distance parameter : $\\alpha$'
def dp_func(dp):
return dp
else:
raise ValueError('Wrong value for distance parameter plot type "{}"'.
format(plot_type['distance_parameter'][0]))
if plot_type['angle_parameter'][0] == 'one_minus_gamma':
ylabel = 'Angle parameter : $1.0-\\gamma$'
def ap_func(ap):
return 1.0-ap
elif plot_type['angle_parameter'][0] in ['initial_normalized_inverted', 'initial_normalized']:
ylabel = 'Angle parameter : $\\gamma$'
def ap_func(ap):
return ap
else:
raise ValueError('Wrong value for angle parameter plot type "{}"'.
format(plot_type['angle_parameter'][0]))
dist_limits = [dp_func(dp) for dp in dist_limits]
ang_limits = [ap_func(ap) for ap in ang_limits]
for cn, cn_nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(cn_nb_sets):
nb_set_surface_pts = nb_set.voronoi_grid_surface_points()
if nb_set_surface_pts is None:
continue
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
mycolor = 'w'
myinvcolor = 'k'
mytext = '{:d}'.format(cn)
else:
mingeom = ce.minimum_geometry()
if mingeom is not None:
mp_symbol = mingeom[0]
csm = mingeom[1]['symmetry_measure']
mycolor = scalarmap.to_rgba(csm)
myinvcolor = [1.0 - mycolor[0], 1.0 - mycolor[1], 1.0 - mycolor[2], 1.0]
mytext = '{}'.format(mp_symbol)
else:
mycolor = 'w'
myinvcolor = 'k'
mytext = '{:d}'.format(cn)
nb_set_surface_pts = [(dp_func(pt[0]), ap_func(pt[1])) for pt in nb_set_surface_pts]
polygon = Polygon(nb_set_surface_pts, closed=True, edgecolor='k', facecolor=mycolor, linewidth=1.2)
subplot.add_patch(polygon)
myipt = len(nb_set_surface_pts) / 2
ipt = int(myipt)
if myipt != ipt:
raise RuntimeError('Number of surface points not even')
patch_center = ((nb_set_surface_pts[0][0] + min(nb_set_surface_pts[ipt][0], dist_limits[1])) / 2,
(nb_set_surface_pts[0][1] + nb_set_surface_pts[ipt][1]) / 2)
if (np.abs(nb_set_surface_pts[-1][1] - nb_set_surface_pts[-2][1]) > 0.06 and
np.abs(min(nb_set_surface_pts[-1][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125):
xytext = ((min(nb_set_surface_pts[-1][0], dist_limits[1]) + nb_set_surface_pts[0][0]) / 2,
(nb_set_surface_pts[-1][1] + nb_set_surface_pts[-2][1]) / 2)
subplot.annotate(mytext, xy=xytext,
ha='center', va='center', color=myinvcolor, fontsize='x-small')
elif (np.abs(nb_set_surface_pts[ipt][1] - nb_set_surface_pts[0][1]) > 0.1 and
np.abs(min(nb_set_surface_pts[ipt][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125):
xytext = patch_center
subplot.annotate(mytext, xy=xytext,
ha='center', va='center', color=myinvcolor, fontsize='x-small')
subplot.set_title(title)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
dist_limits.sort()
ang_limits.sort()
subplot.set_xlim(dist_limits)
subplot.set_ylim(ang_limits)
if strategy is not None:
try:
strategy.add_strategy_visualization_to_subplot(subplot=subplot)
except:
pass
if plot_type['angle_parameter'][0] == 'initial_normalized_inverted':
subplot.axes.invert_yaxis()
scalarmap.set_array([mymin, mymax])
cb = fig.colorbar(scalarmap, ax=subplot, extend='max')
cb.set_label('Continuous symmetry measure')
return fig, subplot
def plot_environments(self, isite, plot_type=None, title='Coordination numbers', max_dist=2.0,
additional_condition=AC.ONLY_ACB, figsize=None, strategy=None):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: Nothing returned, just plot the figure
"""
fig, subplot = self.get_environments_figure(isite=isite, plot_type=plot_type, title=title, max_dist=max_dist,
additional_condition=additional_condition, figsize=figsize,
strategy=strategy)
if fig is None:
return
fig.show()
def save_environments_figure(self, isite, imagename='image.png', plot_type=None, title='Coordination numbers',
max_dist=2.0, additional_condition=AC.ONLY_ACB, figsize=None):
fig, subplot = self.get_environments_figure(isite=isite, plot_type=plot_type, title=title, max_dist=max_dist,
additional_condition=additional_condition, figsize=figsize)
if fig is None:
return
fig.savefig(imagename)
def differences_wrt(self, other):
differences = []
if self.structure != other.structure:
differences.append({'difference': 'structure',
'comparison': '__eq__',
'self': self.structure,
'other': other.structure})
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
if self.valences != other.valences:
differences.append({'difference': 'valences',
'comparison': '__eq__',
'self': self.valences,
'other': other.valences})
if self.info != other.info:
differences.append({'difference': 'info',
'comparison': '__eq__',
'self': self.info,
'other': other.info})
if self.voronoi != other.voronoi:
if self.voronoi.is_close_to(other.voronoi):
differences.append({'difference': 'voronoi',
'comparison': '__eq__',
'self': self.voronoi,
'other': other.voronoi})
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
else:
differences.append({'difference': 'voronoi',
'comparison': 'is_close_to',
'self': self.voronoi,
'other': other.voronoi})
# TODO: make it possible to have "close" voronoi's
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
for isite, self_site_nb_sets in enumerate(self.neighbors_sets):
other_site_nb_sets = other.neighbors_sets[isite]
if self_site_nb_sets is None:
if other_site_nb_sets is None:
continue
else:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'has_neighbors',
'self': 'None',
'other': set(other_site_nb_sets.keys())})
continue
elif other_site_nb_sets is None:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'has_neighbors',
'self': set(self_site_nb_sets.keys()),
'other': 'None'})
continue
self_site_cns = set(self_site_nb_sets.keys())
other_site_cns = set(other_site_nb_sets.keys())
if self_site_cns != other_site_cns:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'coordination_numbers',
'self': self_site_cns,
'other': other_site_cns})
common_cns = self_site_cns.intersection(other_site_cns)
for cn in common_cns:
other_site_cn_nb_sets = other_site_nb_sets[cn]
self_site_cn_nb_sets = self_site_nb_sets[cn]
set_self_site_cn_nb_sets = set(self_site_cn_nb_sets)
set_other_site_cn_nb_sets = set(other_site_cn_nb_sets)
if set_self_site_cn_nb_sets != set_other_site_cn_nb_sets:
differences.append({'difference': 'neighbors_sets[isite={:d}][cn={:d}]'.format(isite, cn),
'comparison': 'neighbors_sets',
'self': self_site_cn_nb_sets,
'other': other_site_cn_nb_sets})
common_nb_sets = set_self_site_cn_nb_sets.intersection(set_other_site_cn_nb_sets)
for nb_set in common_nb_sets:
inb_set_self = self_site_cn_nb_sets.index(nb_set)
inb_set_other = other_site_cn_nb_sets.index(nb_set)
self_ce = self.ce_list[isite][cn][inb_set_self]
other_ce = other.ce_list[isite][cn][inb_set_other]
if self_ce != other_ce:
if self_ce.is_close_to(other_ce):
differences.append({'difference': 'ce_list[isite={:d}][cn={:d}]'
'[inb_set={:d}]'.format(isite, cn, inb_set_self),
'comparison': '__eq__',
'self': self_ce,
'other': other_ce})
else:
differences.append({'difference': 'ce_list[isite={:d}][cn={:d}]'
'[inb_set={:d}]'.format(isite, cn, inb_set_self),
'comparison': 'is_close_to',
'self': self_ce,
'other': other_ce})
return differences
def __eq__(self, other):
if len(self.ce_list) != len(other.ce_list):
return False
if self.voronoi != other.voronoi:
return False
if len(self.valences) != len(other.valences):
return False
if self.sites_map != other.sites_map:
return False
if self.equivalent_sites != other.equivalent_sites:
return False
if self.structure != other.structure:
return False
if self.info != other.info:
return False
for isite, site_ces in enumerate(self.ce_list):
site_nb_sets_self = self.neighbors_sets[isite]
site_nb_sets_other = other.neighbors_sets[isite]
if site_nb_sets_self != site_nb_sets_other:
return False
if site_ces != other.ce_list[isite]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object.
"""
ce_list_dict = [{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]]
for cn in ce_dict} if ce_dict is not None else None for ce_dict in self.ce_list]
nbs_sets_dict = [{str(cn): [nb_set.as_dict() for nb_set in nb_sets]
for cn, nb_sets in site_nbs_sets.items()}
if site_nbs_sets is not None else None
for site_nbs_sets in self.neighbors_sets]
info_dict = {key: val for key, val in self.info.items() if key not in ['sites_info']}
info_dict['sites_info'] = [{'nb_sets_info': {str(cn): {str(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in self.info['sites_info']]
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voronoi": self.voronoi.as_dict(),
"valences": self.valences,
"sites_map": self.sites_map,
"equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites],
"ce_list": ce_list_dict,
"structure": self.structure.as_dict(),
"neighbors_sets": nbs_sets_dict,
"info": info_dict}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the StructureEnvironments object from a dict representation of the StructureEnvironments created
using the as_dict method.
:param d: dict representation of the StructureEnvironments object
:return: StructureEnvironments object
"""
ce_list = [None if (ce_dict == 'None' or ce_dict is None) else {
int(cn): [None if (ced is None or ced == 'None') else
ChemicalEnvironments.from_dict(ced) for ced in ce_dict[cn]]
for cn in ce_dict} for ce_dict in d['ce_list']]
voronoi = DetailedVoronoiContainer.from_dict(d['voronoi'])
structure = Structure.from_dict(d['structure'])
neighbors_sets = [{int(cn): [cls.NeighborsSet.from_dict(dd=nb_set_dict,
structure=structure,
detailed_voronoi=voronoi)
for nb_set_dict in nb_sets]
for cn, nb_sets in site_nbs_sets_dict.items()}
if site_nbs_sets_dict is not None else None
for site_nbs_sets_dict in d['neighbors_sets']]
info = {key: val for key, val in d['info'].items() if key not in ['sites_info']}
if 'sites_info' in d['info']:
info['sites_info'] = [{'nb_sets_info': {int(cn): {int(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in d['info']['sites_info']]
return cls(voronoi=voronoi, valences=d['valences'],
sites_map=d['sites_map'],
equivalent_sites=[[PeriodicSite.from_dict(psd) for psd in psl] for psl in d['equivalent_sites']],
ce_list=ce_list, structure=structure,
neighbors_sets=neighbors_sets,
info=info)
class LightStructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure obtained from a given ChemenvStrategy. Currently,
only strategies leading to the determination of a unique environment for each site is allowed
This class does not store all the information contained in the StructureEnvironments object, only the coordination
environment found
"""
DELTA_MAX_OXIDATION_STATE = 0.1
DEFAULT_STATISTICS_FIELDS = ['anion_list', 'anion_atom_list', 'cation_list', 'cation_atom_list',
'neutral_list', 'neutral_atom_list',
'atom_coordination_environments_present',
'ion_coordination_environments_present',
'fraction_atom_coordination_environments_present',
'fraction_ion_coordination_environments_present',
'coordination_environments_atom_present',
'coordination_environments_ion_present']
class NeighborsSet():
"""
Class used to store a given set of neighbors of a given site (based on a list of sites, the voronoi
container is not part of the LightStructureEnvironments object).
"""
def __init__(self, structure, isite, all_nbs_sites, all_nbs_sites_indices):
self.structure = structure
self.isite = isite
self.all_nbs_sites = all_nbs_sites
myset = set(all_nbs_sites_indices)
if len(myset) != len(all_nbs_sites_indices):
raise ValueError('Set of neighbors contains duplicates !')
self.all_nbs_sites_indices = sorted(myset)
self.all_nbs_sites_indices_unsorted = all_nbs_sites_indices
self.all_nbs_sites_indices_and_image = []
@property
def neighb_coords(self):
return [self.all_nbs_sites[inb]['site'].coords for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites(self):
return [self.all_nbs_sites[inb]['site'] for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites_and_indices(self):
return [{'site': self.all_nbs_sites[inb]['site'],
'index': self.all_nbs_sites[inb]['index']} for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_indices_and_images(self):
return [{'index': self.all_nbs_sites[inb]['index'],
'image_cell': self.all_nbs_sites[inb]['image_cell']}
for inb in self.all_nbs_sites_indices_unsorted]
def __len__(self):
return len(self.all_nbs_sites_indices)
def __hash__(self):
return len(self.all_nbs_sites_indices)
def __eq__(self, other):
return self.isite == other.isite and self.all_nbs_sites_indices == other.all_nbs_sites_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = 'Neighbors Set for site #{:d} :\n'.format(self.isite)
out += ' - Coordination number : {:d}\n'.format(len(self))
out += ' - Neighbors sites indices : {}' \
'\n'.format(', '.join(['{:d}'.format(nb_list_index)
for nb_list_index in self.all_nbs_sites_indices]))
return out
def as_dict(self):
return {'isite': self.isite,
'all_nbs_sites_indices': self.all_nbs_sites_indices_unsorted}
# 'all_nbs_sites_indices_unsorted': self.all_nbs_sites_indices_unsorted}
@classmethod
def from_dict(cls, dd, structure, all_nbs_sites):
return cls(structure=structure,
isite=dd['isite'],
all_nbs_sites=all_nbs_sites,
all_nbs_sites_indices=dd['all_nbs_sites_indices'])
def __init__(self, strategy,
coordination_environments=None, all_nbs_sites=None, neighbors_sets=None,
structure=None, valences=None, valences_origin=None):
"""
Constructor for the LightStructureEnvironments object.
"""
self.strategy = strategy
self.statistics_dict = None
self.coordination_environments = coordination_environments
self._all_nbs_sites = all_nbs_sites
self.neighbors_sets = neighbors_sets
self.structure = structure
self.valences = valences
self.valences_origin = valences_origin
@classmethod
def from_structure_environments(cls, strategy, structure_environments, valences=None, valences_origin=None):
structure = structure_environments.structure
strategy.set_structure_environments(structure_environments=structure_environments)
coordination_environments = [None] * len(structure)
neighbors_sets = [None] * len(structure)
_all_nbs_sites = []
my_all_nbs_sites = []
if valences is None:
valences = structure_environments.valences
if valences_origin is None:
valences_origin = 'from_structure_environments'
else:
if valences_origin is None:
valences_origin = 'user-specified'
for isite, site in enumerate(structure):
site_ces_and_nbs_list = strategy.get_site_ce_fractions_and_neighbors(site, strategy_info=True)
if site_ces_and_nbs_list is None:
continue
coordination_environments[isite] = []
neighbors_sets[isite] = []
site_ces = []
site_nbs_sets = []
for ce_and_neighbors in site_ces_and_nbs_list:
_all_nbs_sites_indices = []
# Coordination environment
ce_dict = {'ce_symbol': ce_and_neighbors['ce_symbol'],
'ce_fraction': ce_and_neighbors['ce_fraction']}
if ce_and_neighbors['ce_dict'] is not None:
csm = ce_and_neighbors['ce_dict']['other_symmetry_measures'][strategy.symmetry_measure_type]
else:
csm = None
ce_dict['csm'] = csm
ce_dict['permutation'] = ce_and_neighbors['ce_dict']['permutation']
site_ces.append(ce_dict)
# Neighbors
neighbors = ce_and_neighbors['neighbors']
for nb_site_and_index in neighbors:
nb_site = nb_site_and_index['site']
try:
nb_allnbs_sites_index = my_all_nbs_sites.index(nb_site)
except ValueError:
nb_index_unitcell = nb_site_and_index['index']
diff = nb_site.frac_coords - structure[nb_index_unitcell].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError('Weird, differences between one site in a periodic image cell is not '
'integer ...')
nb_image_cell = np.array(rounddiff, np.int)
nb_allnbs_sites_index = len(_all_nbs_sites)
_all_nbs_sites.append({'site': nb_site,
'index': nb_index_unitcell,
'image_cell': nb_image_cell})
my_all_nbs_sites.append(nb_site)
_all_nbs_sites_indices.append(nb_allnbs_sites_index)
nb_set = cls.NeighborsSet(structure=structure, isite=isite,
all_nbs_sites=_all_nbs_sites,
all_nbs_sites_indices=_all_nbs_sites_indices)
site_nbs_sets.append(nb_set)
coordination_environments[isite] = site_ces
neighbors_sets[isite] = site_nbs_sets
return cls(strategy=strategy,
coordination_environments=coordination_environments,
all_nbs_sites=_all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure, valences=valences,
valences_origin=valences_origin)
def setup_statistic_lists(self):
self.statistics_dict = {'valences_origin': self.valences_origin,
'anion_list': {}, # OK
'anion_number': None, # OK
'anion_atom_list': {}, # OK
'anion_atom_number': None, # OK
'cation_list': {}, # OK
'cation_number': None, # OK
'cation_atom_list': {}, # OK
'cation_atom_number': None, # OK
'neutral_list': {}, # OK
'neutral_number': None, # OK
'neutral_atom_list': {}, # OK
'neutral_atom_number': None, # OK
'atom_coordination_environments_present': {}, # OK
'ion_coordination_environments_present': {}, # OK
'coordination_environments_ion_present': {}, # OK
'coordination_environments_atom_present': {}, # OK
'fraction_ion_coordination_environments_present': {}, # OK
'fraction_atom_coordination_environments_present': {}, # OK
'fraction_coordination_environments_ion_present': {}, # OK
'fraction_coordination_environments_atom_present': {}, # OK
'count_ion_present': {}, # OK
'count_atom_present': {}, # OK
'count_coordination_environments_present': {}}
atom_stat = self.statistics_dict['atom_coordination_environments_present']
ce_atom_stat = self.statistics_dict['coordination_environments_atom_present']
fraction_atom_stat = self.statistics_dict['fraction_atom_coordination_environments_present']
fraction_ce_atom_stat = self.statistics_dict['fraction_coordination_environments_atom_present']
count_atoms = self.statistics_dict['count_atom_present']
count_ce = self.statistics_dict['count_coordination_environments_present']
for isite, site in enumerate(self.structure):
# Building anion and cation list
site_species = []
if self.valences != 'undefined':
for sp, occ in site.species_and_occu.items():
valence = self.valences[isite]
strspecie = str(Specie(sp.symbol, valence))
if valence < 0:
specielist = self.statistics_dict['anion_list']
atomlist = self.statistics_dict['anion_atom_list']
elif valence > 0:
specielist = self.statistics_dict['cation_list']
atomlist = self.statistics_dict['cation_atom_list']
else:
specielist = self.statistics_dict['neutral_list']
atomlist = self.statistics_dict['neutral_atom_list']
if strspecie not in specielist:
specielist[strspecie] = occ
else:
specielist[strspecie] += occ
if sp.symbol not in atomlist:
atomlist[sp.symbol] = occ
else:
atomlist[sp.symbol] += occ
site_species.append((sp.symbol, valence, occ))
# Building environments lists
if self.coordination_environments[isite] is not None:
site_envs = [(ce_piece_dict['ce_symbol'], ce_piece_dict['ce_fraction'])
for ce_piece_dict in self.coordination_environments[isite]]
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in count_ce:
count_ce[ce_symbol] = 0.0
count_ce[ce_symbol] += fraction
for sp, occ in site.species_and_occu.items():
elmt = sp.symbol
if elmt not in atom_stat:
atom_stat[elmt] = {}
count_atoms[elmt] = 0.0
count_atoms[elmt] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in atom_stat[elmt]:
atom_stat[elmt][ce_symbol] = 0.0
atom_stat[elmt][ce_symbol] += occ * fraction
if ce_symbol not in ce_atom_stat:
ce_atom_stat[ce_symbol] = {}
if elmt not in ce_atom_stat[ce_symbol]:
ce_atom_stat[ce_symbol][elmt] = 0.0
ce_atom_stat[ce_symbol][elmt] += occ * fraction
if self.valences != 'undefined':
ion_stat = self.statistics_dict['ion_coordination_environments_present']
ce_ion_stat = self.statistics_dict['coordination_environments_ion_present']
count_ions = self.statistics_dict['count_ion_present']
for elmt, oxi_state, occ in site_species:
if elmt not in ion_stat:
ion_stat[elmt] = {}
count_ions[elmt] = {}
if oxi_state not in ion_stat[elmt]:
ion_stat[elmt][oxi_state] = {}
count_ions[elmt][oxi_state] = 0.0
count_ions[elmt][oxi_state] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in ion_stat[elmt][oxi_state]:
ion_stat[elmt][oxi_state][ce_symbol] = 0.0
ion_stat[elmt][oxi_state][ce_symbol] += occ * fraction
if ce_symbol not in ce_ion_stat:
ce_ion_stat[ce_symbol] = {}
if elmt not in ce_ion_stat[ce_symbol]:
ce_ion_stat[ce_symbol][elmt] = {}
if oxi_state not in ce_ion_stat[ce_symbol][elmt]:
ce_ion_stat[ce_symbol][elmt][oxi_state] = 0.0
ce_ion_stat[ce_symbol][elmt][oxi_state] += occ * fraction
self.statistics_dict['anion_number'] = len(self.statistics_dict['anion_list'])
self.statistics_dict['anion_atom_number'] = len(self.statistics_dict['anion_atom_list'])
self.statistics_dict['cation_number'] = len(self.statistics_dict['cation_list'])
self.statistics_dict['cation_atom_number'] = len(self.statistics_dict['cation_atom_list'])
self.statistics_dict['neutral_number'] = len(self.statistics_dict['neutral_list'])
self.statistics_dict['neutral_atom_number'] = len(self.statistics_dict['neutral_atom_list'])
for elmt, envs in atom_stat.items():
sumelement = count_atoms[elmt]
fraction_atom_stat[elmt] = {env: fraction / sumelement for env, fraction in envs.items()}
for ce_symbol, atoms in ce_atom_stat.items():
sumsymbol = count_ce[ce_symbol]
fraction_ce_atom_stat[ce_symbol] = {atom: fraction / sumsymbol for atom, fraction in atoms.items()}
ion_stat = self.statistics_dict['ion_coordination_environments_present']
fraction_ion_stat = self.statistics_dict['fraction_ion_coordination_environments_present']
ce_ion_stat = self.statistics_dict['coordination_environments_ion_present']
fraction_ce_ion_stat = self.statistics_dict['fraction_coordination_environments_ion_present']
count_ions = self.statistics_dict['count_ion_present']
for elmt, oxi_states_envs in ion_stat.items():
fraction_ion_stat[elmt] = {}
for oxi_state, envs in oxi_states_envs.items():
sumspecie = count_ions[elmt][oxi_state]
fraction_ion_stat[elmt][oxi_state] = {env: fraction / sumspecie
for env, fraction in envs.items()}
for ce_symbol, ions in ce_ion_stat.items():
fraction_ce_ion_stat[ce_symbol] = {}
sum_ce = np.sum([np.sum(list(oxistates.values())) for elmt, oxistates in ions.items()])
for elmt, oxistates in ions.items():
fraction_ce_ion_stat[ce_symbol][elmt] = {oxistate: fraction / sum_ce
for oxistate, fraction in oxistates.items()}
def get_site_info_for_specie_ce(self, specie, ce_symbol, min_fraction=0.0):
element = specie.symbol
oxi_state = specie.oxi_state
isites = []
csms = []
fractions = []
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species_and_occu]:
if self.valences == 'undefined' or oxi_state == self.valences[isite]:
for ce_dict in self.coordination_environments[isite]:
if ce_symbol == ce_dict['ce_symbol']:
isites.append(isite)
csms.append(ce_dict['csm'])
fractions.append(ce_dict['ce_fraction'])
return {'isites': isites, 'fractions': fractions, 'csms': csms}
def get_site_info_for_specie_allces(self, specie, min_fraction=0.0):
allces = {}
element = specie.symbol
oxi_state = specie.oxi_state
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species_and_occu]:
if self.valences == 'undefined' or oxi_state == self.valences[isite]:
if self.coordination_environments[isite] is None:
continue
for ce_dict in self.coordination_environments[isite]:
if ce_dict['ce_fraction'] < min_fraction:
continue
if ce_dict['ce_symbol'] not in allces:
allces[ce_dict['ce_symbol']] = {'isites': [], 'fractions': [], 'csms': []}
allces[ce_dict['ce_symbol']]['isites'].append(isite)
allces[ce_dict['ce_symbol']]['fractions'].append(ce_dict['ce_fraction'])
allces[ce_dict['ce_symbol']]['csms'].append(ce_dict['csm'])
return allces
def get_statistics(self, statistics_fields=DEFAULT_STATISTICS_FIELDS, bson_compatible=False):
if self.statistics_dict is None:
self.setup_statistic_lists()
if statistics_fields == 'ALL':
statistics_fields = [key for key in self.statistics_dict]
if bson_compatible:
dd = jsanitize({field: self.statistics_dict[field] for field in statistics_fields})
else:
dd = {field: self.statistics_dict[field] for field in statistics_fields}
return dd
def contains_only_one_anion_atom(self, anion_atom):
return (len(self.statistics_dict['anion_atom_list']) == 1 and
anion_atom in self.statistics_dict['anion_atom_list'])
def contains_only_one_anion(self, anion):
return len(self.statistics_dict['anion_list']) == 1 and anion in self.statistics_dict['anion_list']
def site_contains_environment(self, isite, ce_symbol):
if self.coordination_environments[isite] is None:
return False
return ce_symbol in [ce_dict['ce_symbol'] for ce_dict in self.coordination_environments[isite]]
def site_has_clear_environment(self, isite, conditions=None):
if self.coordination_environments[isite] is None:
raise ValueError('Coordination environments have not been determined for site {:d}'.format(isite))
if conditions is None:
return len(self.coordination_environments[isite]) == 1
ce = max(self.coordination_environments[isite], key=lambda x: x['ce_fraction'])
for condition in conditions:
target = condition['target']
if target == 'ce_fraction':
if ce[target] < condition['minvalue']:
return False
elif target == 'csm':
if ce[target] > condition['maxvalue']:
return False
elif target == 'number_of_ces':
if ce[target] > condition['maxnumber']:
return False
else:
raise ValueError('Target "{}" for condition of clear environment is not allowed'.format(target))
pass
return True
def structure_has_clear_environments(self, conditions=None, skip_none=True, skip_empty=False):
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
if skip_none:
continue
else:
return False
if len(self.coordination_environments[isite]) == 0:
if skip_empty:
continue
else:
return False
if not self.site_has_clear_environment(isite=isite, conditions=conditions):
return False
return True
def clear_environments(self, conditions=None):
clear_envs_list = set()
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
continue
if len(self.coordination_environments[isite]) == 0:
continue
if self.site_has_clear_environment(isite=isite, conditions=conditions):
ce = max(self.coordination_environments[isite], key=lambda x: x['ce_fraction'])
clear_envs_list.add(ce['ce_symbol'])
return list(clear_envs_list)
def structure_contains_atom_environment(self, atom_symbol, ce_symbol):
"""
Checks whether the structure contains a given atom in a given environment
:param atom_symbol: Symbol of the atom
:param ce_symbol: Symbol of the coordination environment
:return: True if the coordination environment is found, False otherwise
"""
for isite, site in enumerate(self.structure):
if (Element(atom_symbol) in site.species_and_occu.
element_composition and self.site_contains_environment(isite, ce_symbol)):
return True
return False
@property
def uniquely_determines_coordination_environments(self):
"""
True if the coordination environments are uniquely determined.
"""
return self.strategy.uniquely_determines_coordination_environments
def __eq__(self, other):
"""
Equality method that checks if the LightStructureEnvironments object is equal to another
LightStructureEnvironments object. Two LightStructureEnvironments objects are equal if the strategy used
is the same, if the structure is the same, if the valences used in the strategies are the same, if the
coordination environments and the neighbours determined by the strategy are the same
:param other: LightStructureEnvironments object to compare with
:return: True if both objects are equal, False otherwise
"""
is_equal = (self.strategy == other.strategy and
self.structure == other.structure and
self.coordination_environments == other.coordination_environments and
self.valences == other.valences and
self.neighbors_sets == other.neighbors_sets)
this_sites = [ss['site'] for ss in self._all_nbs_sites]
other_sites = [ss['site'] for ss in other._all_nbs_sites]
this_indices = [ss['index'] for ss in self._all_nbs_sites]
other_indices = [ss['index'] for ss in other._all_nbs_sites]
return (is_equal and this_sites == other_sites and this_indices == other_indices)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the LightStructureEnvironments object.
:return: Bson-serializable dict representation of the LightStructureEnvironments object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"strategy": self.strategy.as_dict(),
"structure": self.structure.as_dict(),
"coordination_environments": self.coordination_environments,
"all_nbs_sites": [{'site': nb_site['site'].as_dict(),
'index': nb_site['index'],
'image_cell': [int(ii) for ii in nb_site['image_cell']]}
for nb_site in self._all_nbs_sites],
"neighbors_sets": [[nb_set.as_dict() for nb_set in site_nb_sets] if site_nb_sets is not None else None
for site_nb_sets in self.neighbors_sets],
"valences": self.valences}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the LightStructureEnvironments object from a dict representation of the
LightStructureEnvironments created using the as_dict method.
:param d: dict representation of the LightStructureEnvironments object
:return: LightStructureEnvironments object
"""
dec = MontyDecoder()
structure = dec.process_decoded(d['structure'])
all_nbs_sites = []
for nb_site in d['all_nbs_sites']:
site = dec.process_decoded(nb_site['site'])
if 'image_cell' in nb_site:
image_cell = np.array(nb_site['image_cell'], np.int)
else:
diff = site.frac_coords - structure[nb_site['index']].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError('Weird, differences between one site in a periodic image cell is not '
'integer ...')
image_cell = np.array(rounddiff, np.int)
all_nbs_sites.append({'site': site,
'index': nb_site['index'],
'image_cell': image_cell})
neighbors_sets = [[cls.NeighborsSet.from_dict(dd=nb_set, structure=structure,
all_nbs_sites=all_nbs_sites)
for nb_set in site_nb_sets] if site_nb_sets is not None else None
for site_nb_sets in d['neighbors_sets']]
return cls(strategy=dec.process_decoded(d['strategy']),
coordination_environments=d['coordination_environments'],
all_nbs_sites=all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure,
valences=d['valences'])
class ChemicalEnvironments(MSONable):
"""
Class used to store all the information about the chemical environment of a given site for a given list of
coordinated neighbours (internally called "cn_map")
"""
def __init__(self, coord_geoms=None):
"""
Initializes the ChemicalEnvironments object containing all the information about the chemical
environment of a given site
:param coord_geoms: coordination geometries to be added to the chemical environment.
"""
if coord_geoms is None:
self.coord_geoms = {}
else:
raise NotImplementedError('Constructor for ChemicalEnvironments with the coord_geoms argument is not'
'yet implemented')
def __getitem__(self, mp_symbol):
if not mp_symbol in self.coord_geoms:
raise IndexError()
return self.coord_geoms[mp_symbol]
def __len__(self):
"""
Returns the number of coordination geometries in this ChemicalEnvironments object
:return: Number of coordination geometries in this ChemicalEnvironments object
"""
return len(self.coord_geoms)
def __iter__(self):
for cg, cg_dict in self.coord_geoms.items():
yield (cg, cg_dict)
def minimum_geometry(self, symmetry_measure_type=None, max_csm=None):
"""
Returns the geometry with the minimum continuous symmetry measure of this ChemicalEnvironments
:return: tuple (symbol, csm) with symbol being the geometry with the minimum continuous symmetry measure and
csm being the continuous symmetry measure associted to it
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
if len(self.coord_geoms) == 0:
return None
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
imin = np.argmin(csms)
if max_csm is not None:
if csmlist[imin] > max_csm:
return None
return cglist[imin], csmlist[imin]
def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None):
"""
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:param n: Number of geometries to be included in the list
:return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
isorted = np.argsort(csms)
if max_csm is not None:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm]
else:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]]
def add_coord_geom(self, mp_symbol, symmetry_measure, algo='UNKNOWN', permutation=None, override=False,
local2perfect_map=None, perfect2local_map=None, detailed_voronoi_index=None,
other_symmetry_measures=None, rotation_matrix=None, scaling_factor=None):
"""
Adds a coordination geometry to the ChemicalEnvironments object
:param mp_symbol: Symbol (internal) of the coordination geometry added
:param symmetry_measure: Symmetry measure of the coordination geometry added
:param algo: Algorithm used for the search of the coordination geometry added
:param permutation: Permutation of the neighbors that leads to the csm stored
:param override: If set to True, the coordination geometry will override the existent one if present
:return: :raise: ChemenvError if the coordination geometry is already added and override is set to False
"""
if not allcg.is_a_valid_coordination_geometry(mp_symbol=mp_symbol):
raise ChemenvError(self.__class__,
'add_coord_geom',
'Coordination geometry with mp_symbol "{mp}" is not valid'
.format(mp=mp_symbol))
if mp_symbol in list(self.coord_geoms.keys()) and not override:
raise ChemenvError(self.__class__,
"add_coord_geom",
"This coordination geometry is already present and override is set to False")
else:
self.coord_geoms[mp_symbol] = {'symmetry_measure': float(symmetry_measure), 'algo': algo,
'permutation': [int(i) for i in permutation],
'local2perfect_map': local2perfect_map,
'perfect2local_map': perfect2local_map,
'detailed_voronoi_index': detailed_voronoi_index,
'other_symmetry_measures': other_symmetry_measures,
'rotation_matrix': rotation_matrix,
'scaling_factor': scaling_factor}
def __str__(self):
"""
Returns a string representation of the ChemicalEnvironments object
:return: String representation of the ChemicalEnvironments object
"""
out = 'Chemical environments object :\n'
if len(self.coord_geoms) == 0:
out += ' => No coordination in it <=\n'
return out
for key in self.coord_geoms.keys():
mp_symbol = key
break
cn = symbol_cn_mapping[mp_symbol]
out += ' => Coordination {} <=\n'.format(cn)
mp_symbols = list(self.coord_geoms.keys())
csms_wcs = [self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wcs_ctwcc'] for mp_symbol in mp_symbols]
icsms_sorted = np.argsort(csms_wcs)
mp_symbols = [mp_symbols[ii] for ii in icsms_sorted]
for mp_symbol in mp_symbols:
csm_wcs = self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wcs_ctwcc']
csm_wocs = self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wocs_ctwocc']
out += ' - {}\n'.format(mp_symbol)
out += ' csm1 (with central site) : {}'.format(csm_wcs)
out += ' csm2 (without central site) : {}'.format(csm_wocs)
out += ' algo : {}'.format(self.coord_geoms[mp_symbol]['algo'])
out += ' perm : {}\n'.format(self.coord_geoms[mp_symbol]['permutation'])
out += ' local2perfect : {}\n'.format(str(self.coord_geoms[mp_symbol]['local2perfect_map']))
out += ' perfect2local : {}\n'.format(str(self.coord_geoms[mp_symbol]['perfect2local_map']))
return out
def is_close_to(self, other, rtol=0.0, atol=1e-8):
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other[mp_symbol]
other_csms_self = cg_dict_self['other_symmetry_measures']
other_csms_other = cg_dict_other['other_symmetry_measures']
for csmtype in ['csm_wcs_ctwcc', 'csm_wcs_ctwocc', 'csm_wcs_csc',
'csm_wocs_ctwcc', 'csm_wocs_ctwocc', 'csm_wocs_csc']:
if not np.isclose(other_csms_self[csmtype], other_csms_other[csmtype], rtol=rtol, atol=atol):
return False
return True
def __eq__(self, other):
"""
Equality method that checks if the ChemicalEnvironments object is equal to another ChemicalEnvironments
object.
:param other: ChemicalEnvironments object to compare with
:return: True if both objects are equal, False otherwise
"""
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other.coord_geoms[mp_symbol]
if cg_dict_self['symmetry_measure'] != cg_dict_other['symmetry_measure']:
return False
if cg_dict_self['algo'] != cg_dict_other['algo']:
return False
if cg_dict_self['permutation'] != cg_dict_other['permutation']:
return False
if cg_dict_self['detailed_voronoi_index'] != cg_dict_other['detailed_voronoi_index']:
return False
other_csms_self = cg_dict_self['other_symmetry_measures']
other_csms_other = cg_dict_other['other_symmetry_measures']
for csmtype in ['csm_wcs_ctwcc', 'csm_wcs_ctwocc', 'csm_wcs_csc',
'csm_wocs_ctwcc', 'csm_wocs_ctwocc', 'csm_wocs_csc']:
if other_csms_self[csmtype] != other_csms_other[csmtype]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Returns a dictionary representation of the ChemicalEnvironments object
:return:
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"coord_geoms": jsanitize(self.coord_geoms)}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the ChemicalEnvironments object from a dict representation of the ChemicalEnvironments created
using the as_dict method.
:param d: dict representation of the ChemicalEnvironments object
:return: ChemicalEnvironments object
"""
ce = cls()
for cg in d['coord_geoms'].keys():
if d['coord_geoms'][cg]['local2perfect_map'] is None:
l2p_map = None
else:
l2p_map = {int(key): int(val) for key, val in d['coord_geoms'][cg]['local2perfect_map'].items()}
if d['coord_geoms'][cg]['perfect2local_map'] is None:
p2l_map = None
else:
p2l_map = {int(key): int(val) for key, val in d['coord_geoms'][cg]['perfect2local_map'].items()}
if ('other_symmetry_measures' in d['coord_geoms'][cg] and
d['coord_geoms'][cg]['other_symmetry_measures'] is not None):
other_csms = d['coord_geoms'][cg]['other_symmetry_measures']
else:
other_csms = None
ce.add_coord_geom(cg,
d['coord_geoms'][cg]['symmetry_measure'],
d['coord_geoms'][cg]['algo'],
permutation=d['coord_geoms'][cg]['permutation'],
local2perfect_map=l2p_map,
perfect2local_map=p2l_map,
detailed_voronoi_index=d['coord_geoms'][cg]['detailed_voronoi_index'],
other_symmetry_measures=other_csms,
rotation_matrix=d['coord_geoms'][cg]['rotation_matrix'],
scaling_factor=d['coord_geoms'][cg]['scaling_factor'])
return ce | mit |
mikebenfield/scikit-learn | examples/svm/plot_svm_anova.py | 85 | 2024 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
saskartt/P4UL | pyRaster/distributeValuesToAreas.py | 1 | 5382 | #!/usr/bin/env python
from utilities import vtkWriteDataStructured2d, vtkWriteHeaderAndGridStructured2d
from utilities import vtkWritePointDataHeader, vtkWritePointDataStructured2D
import sys
import argparse
import numpy as np
from utilities import writeLog
from mapTools import *
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Description:
Labels areas from raster data and generates random values
(e.g. temperatures) matching given probability density function and mean.
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='distributeValuesToAreas.py', description='''Labels areas from raster data and generates random values (e.g. temperatures) matching given probability density function and mean.
''')
parser.add_argument("rfile", type=str, nargs='?', default=None,
help="Name of the raster data file.")
parser.add_argument("-a", "--add", metavar="DFILE", type=str,
help="Add data to an existing raster data file.")
parser.add_argument("-fo", "--fileout", type=str,
help="Name of the output raster data file.")
parser.add_argument("-p", "--printOn", help="Print the resulting raster data.",
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Print resulting data without saving.",
action="store_true", default=False)
parser.add_argument("-d", "--distribution", type=str, nargs=2, metavar=('TYPE', 'SCALE'),
help="Use a statistical distribution function to vary values specific to spearate areas. Types available: gaussian and uniform. For Gaussian distribution the scale value is standard deviation and for the uniform distribution it is the maximum offset. Example: gaussian 4.5 .")
parser.add_argument("-m", "--mean", type=float,
help="Mean of the distribution or constant value if not using distributed values.")
parser.add_argument("-n", "--name", default="Temperature", type=str,
help="Name of the VTK data array. Leave empty for 'Temperature'.")
parser.add_argument("-v", "--vtk", metavar="VTKFILE", type=str,
help="Write the results in VTKFILE with topography.")
parser.add_argument("-ft", "--filetopo", type=str,
help="File containing the topography data for VTK results (npz format).", default='')
args = parser.parse_args()
writeLog(parser, args)
#==========================================================#
if(args.vtk and (args.filetopo == '')):
sys.exit(' Error: VTK results require -ft/--filetopo. Exiting ...')
# Read data into an ndarray
Rdict = readNumpyZTile(args.rfile)
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
# Label shapes from 0 to shapeCount-1 with SciPy ndimage package
if (not(args.distribution == None)):
LR, shapeCount = labelRaster(R)
else: # no need for labeling
LR = R
shapeCount = 1
R = None
# Initialize a new array or read existing data
if (args.add == None):
R = np.zeros(Rdims)
else:
Rdict = readNumpyZTile(args.add)
R = Rdict['R']
Rdims2 = np.array(np.shape(R))
if (all(Rdims != Rdims2)):
sys.exit(' Error: size mismatch between two data files when appending.')
# Fill the areas with generated values
if (args.distribution == None): # Fill with a constant value
R[np.nonzero(LR)] = args.mean
elif (args.distribution[0] == "gaussian"):
for i in xrange(shapeCount):
R[LR == i + 1] = np.random.normal(args.mean, args.distribution[1])
elif (args.distribution[0] == "uniform"):
for i in xrange(shapeCount):
R[LR == i + 1] = args.mean + \
(np.random.uniform(-args.distribution[1], args.distribution[1]))
else:
sys.exit('Error: invalid distribution given.')
LR = None
# Calculate mean and move nonzero values accordingly
offset = np.nanmean(R[np.nonzero(R)]) - args.mean
R[np.nonzero(R)] = R[np.nonzero(R)] - offset
# Read topography data
if (not(args.vtk) == None and not(args.printOnly)):
topoDict = readNumpyZTile(args.filetopo)
topo = topoDict['R']
topoDims = np.array(np.shape(topo))
topoOrig = topoDict['GlobOrig']
topoDPX = topoDict['dPx']
topoDict = None
if(all(topoDims != Rdims)):
sys.exit(' Error: mismatch in raster data and topography data shapes, Topo_dims={} vs. Data_dims={}').format(
topoDims, Rdims)
# Fill in the coordinate grid
X = np.zeros(Rdims)
Y = np.zeros(Rdims)
for i in xrange(Rdims[0]):
X[i, :] = i;
for i in xrange(Rdims[1]):
Y[:, i] = i
# Write the data into a VTK file
# N axis of (N,E) coordinates has to be reversed
t_vtk = vtkWriteHeaderAndGridStructured2d(
Y, X, topo[::-1, :], args.vtk, 'VTK map');
t_vtk = vtkWritePointDataHeader(t_vtk, R[::-1, :], 1)
t_vtk = vtkWritePointDataStructured2D(t_vtk, R[::-1, :], Y, args.name)
t_vtk.close()
# Save as npz
if(not args.printOnly):
Rdict['R'] = R; Rdict['dPx']: dpx; Rdict['GlobOrig']: ROrig; Rdict['ShapeCount']: shapecount
saveTileAsNumpyZ(args.fileout, Rdict)
Rdict = None
# Plot the resulting raster
if(args.printOn or args.printOnly):
R[R == 0] = np.nan # Replacing zeros with NaN helps plotting
figDims = 13. * (Rdims[::-1].astype(float) / np.max(Rdims))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot(fig, R, args.rfile, False, False)
plt.show()
| mit |
Crompulence/cpl-library | utils/plot_parallel.py | 1 | 5901 | import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
import unittest
#def primefactors(num, f):
# factors = np.zeros(num/2)
# i = 2 #eligible factor
# f = 1 #number of factors
# n = num #store input number into a temporary variable
# for
# if (n%i == 0): #if i divides 2, it is a factor
# factors[f] = i
# f = f+1
# n = n/i
# else:
# i = i+1 #not a factor. move to next number
# if (n == 1):
# f = f-1 #its value will be one more than the number of factors
# return
def collect_data(recv_array, cart_comm, ncxyz, plotrank=0):
ncx, ncy, ncz = ncxyz
rank = cart_comm.Get_rank()
all_recv_array = cart_comm.gather(recv_array, root=plotrank)
ncx_l = ncxyz[0]/cart_comm.Get_topo()[0][0]
ncy_l = ncxyz[1]/cart_comm.Get_topo()[0][1]
ncz_l = ncxyz[2]/cart_comm.Get_topo()[0][2]
if rank == plotrank:
field = np.zeros([recv_array.shape[0], ncx, ncy, ncz])
#Loop over all processors
for n, r in enumerate(all_recv_array):
i, j, k = cart_comm.Get_coords(n)
field[:, i*ncx_l:(i+1)*ncx_l,
j*ncy_l:(j+1)*ncy_l,
k*ncz_l:(k+1)*ncz_l] = r[:,:,:,:]
return field
else:
return None
#Setup send and recv buffers
def allocate_buffer(ncxyz_l, cart_comm):
ncx_l, ncy_l, ncz_l = ncxyz_l
ir, jr, kr = cart_comm.Get_coords(cart_comm.Get_rank())
recv_array = np.zeros((3, ncx_l, ncy_l, ncz_l), order='F', dtype=np.float64)
for i in range(ncx_l):
for j in range(ncy_l):
for k in range(ncz_l):
recv_array[0,i,j,k] = i + ncx_l*ir
recv_array[1,i,j,k] = j + ncy_l*jr
recv_array[2,i,j,k] = k + ncz_l*kr
return recv_array
if __name__ == "__main__":
import unittest
class TestPlot(unittest.TestCase):
@classmethod
def setUpClass(self):
self.plotrank = 0
#initialise MPI and CPL
self.comm = MPI.COMM_WORLD
# Parameters of the cpu topology (cartesian grid)
ncx = 64; ncy = 64; ncz = 8
self.ncxyz = [ncx, ncy, ncz]
#primefactors(num, factors, f)
self.npxyz = np.array([2, 2, 2], order='F', dtype=np.int32)
self.xyzL = np.array([195.2503206, 18.62550553, 133.3416884], order='F', dtype=np.float64)
self.xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
#Setup coupled simulation
self.cart_comm = self.comm.Create_cart([self.npxyz[0], self.npxyz[1], self.npxyz[2]])
ncx_l = self.ncxyz[0]/self.npxyz[0]
ncy_l = self.ncxyz[1]/self.npxyz[1]
ncz_l = self.ncxyz[2]/self.npxyz[2]
self.ncxyz_l = [ncx_l, ncy_l, ncz_l]
self.recv_array = allocate_buffer(self.ncxyz_l, self.cart_comm)
def test_collect(self):
field = collect_data(self.recv_array, self.cart_comm,
self.ncxyz, self.plotrank)
if self.cart_comm.Get_rank() == self.plotrank:
for i in range(self.ncxyz[0]):
for j in range(self.ncxyz[1]):
for k in range(self.ncxyz[2]):
self.assertEqual(field[0,i,j,k], i)
self.assertEqual(field[1,i,j,k], j)
self.assertEqual(field[2,i,j,k], k)
def test_plot(self):
field = collect_data(self.recv_array, self.cart_comm,
self.ncxyz, self.plotrank)
if self.cart_comm.Get_rank() == self.plotrank:
x = np.linspace(0.,1.,self.ncxyz[0])
y = np.linspace(0.,1.,self.ncxyz[1])
z = np.linspace(0.,1.,self.ncxyz[2])
X, Y, Z = np.meshgrid(x, y, z)
plt.pcolormesh(X[:,:,0], Y[:,:,0],
np.mean(field[0,:,:,:],2), alpha=0.4)
plt.colorbar()
plt.ion()
plt.show()
plt.pause(2.)
self.assertEqual(raw_input("Plot looks correct? y/n:"),"y")
plt.ioff()
@classmethod
def tearDownClass(self):
self.cart_comm.Free()
MPI.Finalize()
unittest.main()
# #initialise MPI and CPL
# comm = MPI.COMM_WORLD
# comm = comm
# rank = comm.Get_rank()
# nprocs_realm = comm.Get_size()
# # Parameters of the cpu topology (cartesian grid)
# ncx = 64; ncy = 64; ncz = 8
# ncxyz = [ncx, ncy, ncz]
# npxyz = np.array([2, 1, 2], order='F', dtype=np.int32)
# NProcs = np.product(npxyz)
# xyzL = np.array([195.2503206, 18.62550553, 133.3416884], order='F', dtype=np.float64)
# xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
# #Setup coupled simulation
# cart_comm = comm.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
# ncx_l = ncxyz[0]/npxyz[0]
# ncy_l = ncxyz[1]/npxyz[1]
# ncz_l = ncxyz[2]/npxyz[2]
# recv_array = allocate_buffer(ncx_l, ncy_l, ncz_l, cart_comm)
# plotrank = 0
# field = collect_data(recv_array, cart_comm, ncxyz, plotrank)
# if rank == plotrank:
# for i in range(ncx):
# for j in range(ncy):
# for k in range(ncz):
# assert field[0,i,j,k] == i
# assert field[1,i,j,k] == j
# assert field[2,i,j,k] == k
# x = np.linspace(0.,1.,ncx)
# y = np.linspace(0.,1.,ncy)
# z = np.linspace(0.,1.,ncz)
# X, Y, Z = np.meshgrid(x, y, z)
# plt.pcolormesh(X[:,:,0], Y[:,:,0], np.mean(field[0,:,:,:],2), alpha=0.4)
# plt.colorbar()
# plt.show()
#
# MPI.Finalize()
| gpl-3.0 |
Antiun/yelizariev-addons | sugarcrm_migration/import_kashflow.py | 16 | 21779 | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from openerp.exceptions import except_orm
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base
from openerp.addons.import_framework.mapper import *
import re
import time
import datetime as DT
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import csv
class fix_kashflow_date(mapper):
"""
convert '31/12/2012' to '2012-12-31'
"""
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
s = external_values.get(self.field_name)
if not s:
return ''
d,m,y = str(s).split('/')
return '%s-%s-%s' % (y,m,d)
class date_to_period(fix_kashflow_date, dbmapper):
def __init__(self, field_name, context):
super(date_to_period, self).__init__(field_name)
self.context = context()
def __call__(self, external_values):
s = super(date_to_period, self).__call__(external_values)
dt = DT.datetime.strptime(s, tools.DEFAULT_SERVER_DATE_FORMAT)
period_ids = self.parent.pool.get('account.period').find(self.parent.cr, self.parent.uid, dt=dt, context=self.context)
if not period_ids:
print 'period_ids not found', s
return period_ids and str(period_ids[0]) or ''
class import_kashflow(import_base):
TABLE_COMPANY = 'companies'
TABLE_CUSTOMER = '-customers'
TABLE_SUPPLIER = '-suppliers'
TABLE_PARTNER = '_partners'
TABLE_JOURNAL = '_journals'
TABLE_NOMINAL_CODES = '-nominal-codes'
TABLE_NOMINAL_CODES_ROOT = '-nominal-codes_root'
TABLE_TRANSACTION = '-transactions'
COL_ID_CUSTOM = 'id'
COL_LINE_NUM = 'line_num'
COL_NOMINAL_CODE = 'Nominal Code'
COL_NOMINAL_CODE_NAME = 'Name'
COL_TR_TYPE = 'Transaction Type'
COL_TR_BANK = 'Bank'
COL_TR_CODE = 'Code'
COL_TR_DATE = 'Date'
COL_TR_TRANSACTION = 'Account'
COL_TR_COMMENT = 'Comment'
COL_TR_AMOUNT = 'Amount'
COL_TR_VAT_RATE = 'VAT Rate'
COL_TR_VAT_AMOUNT = 'VAT Amount'
COL_TR_DEPARTMENT = 'Department'
COL_P_CODE = 'Code'
COL_P_NAME = 'Name'
COL_P_ADDRESS = 'Address'
COL_P_LINE_2 = 'Line 2'
COL_P_LINE_3 = 'Line 3'
COL_P_LINE_4 = 'Line 4'
COL_P_POST_CODE = 'Post Code'
COL_P_FULL_NAME = 'Full Name'
COL_P_TELEPHONE = 'Telephone'
COL_P_MOBILE = 'Mobile'
COL_P_SOURCE = 'Source'
def initialize(self):
# files:
# COMPANY_NAME-customers.csv
# COMPANY_NAME-suppliers.csv
# COMPANY_NAME-nominal-codes.csv
# COMPANY_NAME-transactions.csv
self.csv_files = self.context.get('csv_files')
self.import_options.update({'separator':',',
#'quoting':''
})
companies = []
for f in self.csv_files:
if f.endswith('-transactions.csv'):
c = re.match('.*?([^/]*)-transactions.csv$', f).group(1)
companies.append(c)
self.companies = [{'name':c} for c in companies]
def get_data(self, table):
file_name = filter(lambda f: f.endswith('/%s.csv' % table), self.csv_files)
if file_name:
_logger.info('read file "%s"' % ( '%s.csv' % table))
file_name = file_name[0]
else:
_logger.info('file not found %s' % ( '%s.csv' % table))
return []
with open(file_name, 'rb') as csvfile:
fixed_file = StringIO(csvfile.read() .replace('\r\n', '\n'))
reader = csv.DictReader(fixed_file,
delimiter = self.import_options.get('separator'),
#quotechar = self.import_options.get('quoting'),
)
res = list(reader)
for line_num, line in enumerate(res):
line[self.COL_LINE_NUM] = str(line_num)
return res
def get_mapping(self):
res = [self.get_mapping_company()]
for c in self.companies:
company = c.get('name')
res.extend(
self.get_mapping_partners(company) +
[
self.get_mapping_journals(company),
self.get_mapping_nominal_codes(company),
self.get_mapping_transactions(company),
])
return res
def table_company(self):
t = DataFrame(self.companies)
return t
def finalize_companies(self):
for c in self.companies:
context = self.get_context_company(c.get('name'))()
company_id = context.get('company_id')
for year in [2012,2013,2014]:
existed = self.pool.get('account.fiscalyear').search(self.cr, self.uid, [('code','=',str(year)), ('company_id','=', company_id)])
if existed:
continue
year_id = self.pool.get('account.fiscalyear').create(self.cr, self.uid, {
'name':'%s (%s)' % (str(year), c.get('name')),
'code':str(year),
'date_start': time.strftime('%s-04-01' % year),
'date_stop': time.strftime('%s-03-31' % (year+1)),
'company_id': company_id
})
self.pool.get('account.fiscalyear').create_period3(self.cr, self.uid, [year_id])
def get_mapping_company(self):
return {
'name': self.TABLE_COMPANY,
'table': self.table_company,
'dependencies' : [],
'models':[
{'model' : 'res.company',
'finalize': self.finalize_companies,
'fields': {
'id': xml_id(self.TABLE_COMPANY, 'name'),
'name': 'name',
}
},
{'model' : 'account.account',
'hook': self.hook_account_account_root,
'fields': {
'id': xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'name'),
'company_id/id': xml_id(self.TABLE_COMPANY, 'name'),
'code': const('0'),
'type': const('view'),
'name': 'name',
'user_type/id': const('account.data_account_type_view'),
}
}
]
}
def get_table(self, company, table):
def f():
t = DataFrame(self.get_data(company + table))
return t
return f
def get_partner_by_name(self, name):
id = self.pool['res.partner'].search(self.cr, self.uid, [('name','=', name)])
if isinstance(id, list):
if len(id)!=1:
return None
id = id[0]
return id
def get_hook_check_existed_partners(self, xml_id_mapper, field_name, another_hook=None):
def f(external_values):
if another_hook:
external_values = another_hook(external_values)
if not external_values:
return None
name = external_values.get(field_name)
if not name:
return None
id = self.get_partner_by_name(name)
if id:
# create new reference to existed record
xml_id_mapper.set_parent(self)
data_name = xml_id_mapper(external_values)
if self.pool.get('ir.model.data').search(self.cr, self.uid, [('name', '=', data_name)]):
# already created
return None
vals = {'name': data_name,
'model': 'res.partner',
#'module': self.module_name,
'module': '',
'res_id': id,
}
self.pool.get('ir.model.data').create(self.cr, self.uid, vals, context=self.context)
return None
return external_values # create new partner
return f
def get_mapping_partners(self, company):
table = company + self.TABLE_PARTNER
def f(customer=False, supplier=False):
table_cus_or_sup = self.TABLE_CUSTOMER if customer else self.TABLE_SUPPLIER
return {
'name': company + table_cus_or_sup,
'table': self.get_table(company, table_cus_or_sup),
'dependencies' : [self.TABLE_COMPANY],
'models':[
{'model' : 'res.partner',
'hook': self.get_hook_check_existed_partners(xml_id(table, self.COL_P_CODE), self.COL_P_NAME),
'fields': {
'id': xml_id(table, self.COL_P_CODE),
'company_id/id': self.company_id(company),
'name': self.COL_P_NAME,
'ref': self.COL_P_CODE,
'customer': const('1') if customer else const('0'),
'supplier': const('1') if supplier else const('0'),
'phone': self.COL_P_TELEPHONE,
#'mobile': self.COL_P_MOBILE,
'zip': self.COL_P_POST_CODE,
'street': self.COL_P_ADDRESS,
'street2': concat(self.COL_P_LINE_2,self.COL_P_LINE_3,self.COL_P_LINE_4),
'comment': ppconcat(self.COL_P_SOURCE),
}
},
{'model' : 'res.partner',
'hook': self.get_hook_check_existed_partners(xml_id(table+'_child', self.COL_P_CODE), self.COL_P_FULL_NAME, self.get_hook_ignore_empty(self.COL_P_MOBILE, self.COL_P_FULL_NAME)),
'fields': {
'id': xml_id(table+'_child', self.COL_P_CODE),
'company_id/id': self.company_id(company),
'parent_id/id': xml_id(table, self.COL_P_CODE),
'name': value(self.COL_P_FULL_NAME, default='NONAME'),
'customer': const('1') if customer else const('0'),
'supplier': const('1') if supplier else const('0'),
#'phone': self.COL_P_TELEPHONE,
'mobile': self.COL_P_MOBILE,
}
}
]
}
return [f(customer=True), f(supplier=True)]
def company_id(self, company):
id = self.get_xml_id(self.TABLE_COMPANY, 'name', {'name':company})
return const(id)
def get_hook_account_account(self, company):
def f(external_values):
id = self.get_xml_id(company + self.TABLE_NOMINAL_CODES, self.COL_NOMINAL_CODE, external_values)
res_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+id
)
if res_id:
# account already created
return None
external_values['company_name'] = company
return external_values
return f
def hook_account_account_root(self, external_values):
id = self.get_xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'name', external_values)
res_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+id
)
if res_id:
# account already created
return None
return external_values
def get_mapping_nominal_codes(self, company):
table = company + self.TABLE_NOMINAL_CODES
return {
'name': table,
'table': self.get_table(company, self.TABLE_NOMINAL_CODES),
'dependencies' : [self.TABLE_COMPANY],
'models':[{
'model' : 'account.account',
'context': self.get_context_company(company),
'hook': self.get_hook_account_account(company),
'fields': {
'id': xml_id(table, self.COL_NOMINAL_CODE),
'company_id/id': self.company_id(company),
'code': self.COL_NOMINAL_CODE,
'name': self.COL_NOMINAL_CODE_NAME,
'user_type/id': const('account.data_account_type_view'),
'parent_id/id': xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'company_name'),
}
}]
}
def get_xml_id(self, table, col, external_values):
id = xml_id(table, col)
id.set_parent(self)
return id(external_values)
map_journal_type = {
'SI':'sale',# Sales Invoice
'SC':'sale',# Sales Credit
'PC':'purchase',# Purchase Credit
'PI':'purchase',# Purchase Invoice
'JC':'general',# Journal Credit
'JD':'general',# Journal Debit
'BP':'bank',# Bank Payment
'BR':'bank',# Bank Receipt
}
def table_journal(self):
res = []
for code in self.map_journal_type:
res.append({self.COL_TR_TYPE: code})
t = DataFrame(res)
return t
def get_mapping_journals(self, company):
journal = company + self.TABLE_JOURNAL
return {
'name': journal,
'table': self.table_journal,
'dependencies' : [self.TABLE_COMPANY],
'models':[
{'model' : 'account.journal',
'context': self.get_context_company(company),
'fields': {
'id': xml_id(journal, self.COL_TR_TYPE),
'company_id/id': self.company_id(company),
'name': self.COL_TR_TYPE,
'code': self.COL_TR_TYPE,
'type': map_val(self.COL_TR_TYPE, self.map_journal_type),
}
},
]
}
def get_context_company(self, company):
def f():
company_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+self.company_id(company)({})
)
return {'company_id':company_id}
return f
def hook_bank_entries_move(self, external_values):
journal_type = external_values.get(self.COL_TR_TYPE)
if journal_type not in ['BR', 'BP']:
return None
return external_values
def hook_bank_entries_move_line(self, external_values):
external_values = self.hook_bank_entries_move(external_values)
if external_values is None:
return None
journal_type = external_values.get(self.COL_TR_TYPE)
external_values['debit'] = '0'
external_values['credit'] = '0'
external_values[self.COL_ID_CUSTOM] = external_values[self.COL_LINE_NUM]
bank = external_values.get(self.COL_TR_BANK)
debit = external_values.copy()
credit = external_values.copy()
debit[self.COL_ID_CUSTOM] += '_debit'
credit[self.COL_ID_CUSTOM] += '_credit'
amount = float(external_values.get(self.COL_TR_AMOUNT))
debit['debit'] = amount
credit['credit'] = amount
if journal_type == 'BP':
# expense
debit['account_id'] = external_values.get(self.COL_TR_CODE)
credit['account_id'] = external_values.get(self.COL_TR_BANK)
else:
# income
debit['account_id'] = external_values.get(self.COL_TR_BANK)
credit['account_id'] = external_values.get(self.COL_TR_CODE)
return [debit, credit]
def hook_journal_entries_move(self, external_values):
journal_type = external_values.get(self.COL_TR_TYPE)
if journal_type not in ['JC', 'JD', 'SI', 'SC', 'PI', 'PC']:
return None
if not external_values.get(self.COL_TR_TRANSACTION):
tr = 'journal-entry-%s-%s' % (
external_values.get(self.COL_TR_DATE),
external_values.get(self.COL_TR_AMOUNT),
)
external_values[self.COL_TR_TRANSACTION] = tr
return external_values
def hook_journal_entries_move_line(self, external_values):
external_values = self.hook_journal_entries_move(external_values)
if external_values is None:
return None
journal_type = external_values.get(self.COL_TR_TYPE)
amount = external_values.get(self.COL_TR_AMOUNT)
if journal_type in ['JC', 'SC', 'PC']:
external_values['debit']='0'
external_values['credit']=amount
else:
external_values['debit']=amount
external_values['credit']='0'
bank = external_values.get(self.COL_TR_BANK)
partner_id = ''
if bank and not bank.isdigit():
partner_id = bank
external_values['partner_id'] = partner_id
external_values[self.COL_ID_CUSTOM] = '%s-%s-%s'%(
external_values[self.COL_TR_TRANSACTION],
external_values[self.COL_TR_CODE],
external_values.get(self.COL_LINE_NUM)
)
res = [external_values]
if journal_type not in ['JC', 'JD']:
bank_line = external_values.copy()
bank_line['debit'] = external_values['credit']
bank_line['credit'] = external_values['debit']
bank_line[self.COL_TR_CODE] = '1200'
bank_line[self.COL_ID_CUSTOM] += '_extra'
res.append(bank_line)
return res
def get_mapping_transactions(self, company):
table = company + self.TABLE_TRANSACTION
move = table + '_move'
move_line = move + '_line'
journal = company + self.TABLE_JOURNAL
account = company + self.TABLE_NOMINAL_CODES
partner = company + self.TABLE_PARTNER
return {
'name': table,
'table': self.get_table(company, self.TABLE_TRANSACTION),
'dependencies' : [company + self.TABLE_JOURNAL,
company + self.TABLE_NOMINAL_CODES,
company + self.TABLE_CUSTOMER,
company + self.TABLE_SUPPLIER,
],
'models':[
# TODO VAT
# JC,JD, SI,SC, PC,PI
{'model' : 'account.move',
'hook': self.hook_journal_entries_move,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move, self.COL_TR_TRANSACTION),
'company_id/id': self.company_id(company),
'ref': self.COL_TR_TRANSACTION,
'journal_id/id': xml_id(journal, self.COL_TR_TYPE),
'period_id/.id': date_to_period(self.COL_TR_DATE, self.get_context_company(company)),
'date': fix_kashflow_date(self.COL_TR_DATE),
'narration': self.COL_TR_COMMENT,
}
},
{'model' : 'account.move.line',
'hook': self.hook_journal_entries_move_line,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move_line, self.COL_ID_CUSTOM),
'company_id/id': self.company_id(company),
'name': value(self.COL_TR_COMMENT, fallback=self.COL_TR_DATE, default='NONAME'),
'ref': self.COL_TR_TRANSACTION,
'date': fix_kashflow_date(self.COL_TR_DATE),
'move_id/id': xml_id(move, self.COL_TR_TRANSACTION),
'partner_id/.id': res_id(const(partner), 'partner_id', default=None),
'account_id/id': xml_id(account, self.COL_TR_CODE),
'debit':'debit',
'credit':'credit',
}
},
# BP,BR
{'model' : 'account.move',
'context': self.get_context_company(company),
'hook': self.hook_bank_entries_move,
'fields': {
'id': xml_id(move, self.COL_LINE_NUM),
'company_id/id': self.company_id(company),
'ref': self.COL_TR_TRANSACTION,
'journal_id/id': xml_id(journal, self.COL_TR_TYPE),
'period_id/.id': date_to_period(self.COL_TR_DATE, self.get_context_company(company)),
'date': fix_kashflow_date(self.COL_TR_DATE),
'narration': self.COL_TR_COMMENT,
}
},
{'model' : 'account.move.line',
'hook': self.hook_bank_entries_move_line,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move_line, self.COL_ID_CUSTOM),
'company_id/id': self.company_id(company),
'name': value(self.COL_TR_COMMENT, fallback=self.COL_TR_DATE, default='NONAME'),
'ref': self.COL_TR_TRANSACTION,
'date': fix_kashflow_date(self.COL_TR_DATE),
'move_id/id': xml_id(move, self.COL_LINE_NUM),
'account_id/id': xml_id(account, 'account_id'),
'debit':'debit',
'credit':'credit',
}
},
]
}
| lgpl-3.0 |
AlexRobson/scikit-learn | sklearn/tree/export.py | 75 | 15670 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
bikong2/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
aisthesis/pynance | pynance/opt/price.py | 2 | 7070 | """
.. Copyright (c) 2014, 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Options - price (:mod:`pynance.opt.price`)
==================================================
.. currentmodule:: pynance.opt.price
"""
from __future__ import absolute_import
import pandas as pd
from ._common import _getprice
from ._common import _relevant_rows
from . import _constants
class Price(object):
"""
Wrapper class for :class:`pandas.DataFrame` for retrieving
options prices.
Objects of this class are not intended for direct instantiation
but are created as attributes of objects of type :class:`~pynance.opt.core.Options`.
.. versionadded:: 0.3.0
Parameters
----------
df : :class:`pandas.DataFrame`
Options data.
Attributes
----------
data : :class:`pandas.DataFrame`
Options data.
Methods
-------
.. automethod:: exps
.. automethod:: get
.. automethod:: metrics
.. automethod:: strikes
"""
def __init__(self, df):
self.data = df
def get(self, opttype, strike, expiry):
"""
Price as midpoint between bid and ask.
Parameters
----------
opttype : str
'call' or 'put'.
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : float
Examples
--------
>>> geopts = pn.opt.get('ge')
>>> geopts.price.get('call', 26., '2015-09-18')
0.94
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
return _getprice(_optrow)
def metrics(self, opttype, strike, expiry):
"""
Basic metrics for a specific option.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : :class:`pandas.DataFrame`
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
_index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time']
_out = pd.DataFrame(index=_index, columns=['Value'])
_out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow)
for _name in _index[2:]:
_out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0]
_eq_price = _out.loc['Underlying_Price', 'Value']
if opttype == 'put':
_out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price)
else:
_out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price)
return _out
def strikes(self, opttype, expiry):
"""
Retrieve option prices for all strikes of a given type with a given expiration.
Parameters
----------
opttype : str ('call' or 'put')
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : datetime.datetime
Time of quote.
See Also
--------
:meth:`exps`
"""
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,),
"No key for {} {}".format(expiry, opttype))
_index = _relevant.index.get_level_values('Strike')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_underlying = _relevant.loc[:, 'Underlying_Price'].values[0]
_quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying)
return _df, _underlying, _quotetime
def exps(self, opttype, strike):
"""
Prices for given strike on all available dates.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : :class:`datetime.datetime`
Time of quote.
See Also
--------
:meth:`strikes`
"""
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,),
"No key for {} {}".format(strike, opttype))
_index = _relevant.index.get_level_values('Expiry')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_eq = _relevant.loc[:, 'Underlying_Price'].values[0]
_qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike)
return _df, _eq, _qt
def _set_tv_other_ix(df, opttype, pricecol, tvcol, eqprice, strike):
if opttype == 'put':
if strike <= eqprice:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = eqprice - strike
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
else:
if eqprice <= strike:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = strike - eqprice
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
def _set_tv_strike_ix(df, opttype, pricecol, tvcol, eqprice):
df.loc[:, tvcol] = df.loc[:, pricecol]
if opttype == 'put':
_mask = (df.index > eqprice)
df.loc[_mask, tvcol] += eqprice - df.index[_mask]
else:
_mask = (df.index < eqprice)
df.loc[_mask, tvcol] += df.index[_mask] - eqprice
return
def _get_put_time_val(putprice, strike, eqprice):
if strike <= eqprice:
return putprice
return round(putprice + eqprice - strike, _constants.NDIGITS_SIG)
def _get_call_time_val(callprice, strike, eqprice):
if eqprice <= strike:
return callprice
return round(callprice + strike - eqprice, _constants.NDIGITS_SIG)
| mit |
AkademieOlympia/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
larsmans/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 15 | 33321 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
"""Regression test: max_features didn't work correctly in 0.14."""
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('l1', 'l2'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
etkirsch/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/_splom.py | 1 | 70319 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Splom(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "splom"
_valid_props = {
"customdata",
"customdatasrc",
"diagonal",
"dimensiondefaults",
"dimensions",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"marker",
"meta",
"metasrc",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"showlowerhalf",
"showupperhalf",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"xaxes",
"xhoverformat",
"yaxes",
"yhoverformat",
}
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# diagonal
# --------
@property
def diagonal(self):
"""
The 'diagonal' property is an instance of Diagonal
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Diagonal`
- A dict of string/value properties that will be passed
to the Diagonal constructor
Supported dict properties:
visible
Determines whether or not subplots on the
diagonal are displayed.
Returns
-------
plotly.graph_objs.splom.Diagonal
"""
return self["diagonal"]
@diagonal.setter
def diagonal(self, val):
self["diagonal"] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.splom.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
axis
:class:`plotly.graph_objects.splom.dimension.Ax
is` instance or dict with compatible properties
label
Sets the label corresponding to this splom
dimension.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the dimension values to be plotted.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Determines whether or not this dimension is
shown on the graph. Note that even visible
false dimension contribute to the default grid
generate by this splom trace.
Returns
-------
tuple[plotly.graph_objs.splom.Dimension]
"""
return self["dimensions"]
@dimensions.setter
def dimensions(self, val):
self["dimensions"] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the default
property values to use for elements of splom.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Dimension`
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.splom.Dimension
"""
return self["dimensiondefaults"]
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self["dimensiondefaults"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.splom.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.splom.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.splom.marker.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
Returns
-------
plotly.graph_objs.splom.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.splom.selected.Mar
ker` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.splom.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showlowerhalf
# -------------
@property
def showlowerhalf(self):
"""
Determines whether or not subplots on the lower half from the
diagonal are displayed.
The 'showlowerhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlowerhalf"]
@showlowerhalf.setter
def showlowerhalf(self, val):
self["showlowerhalf"] = val
# showupperhalf
# -------------
@property
def showupperhalf(self):
"""
Determines whether or not subplots on the upper half from the
diagonal are displayed.
The 'showupperhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showupperhalf"]
@showupperhalf.setter
def showupperhalf(self, val):
self["showupperhalf"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.splom.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair to appear on
hover. If a single string, the same string appears over all the
data points. If an array of string, the items are mapped in
order to the this trace's (x,y) coordinates.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.splom.unselected.M
arker` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.splom.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# xaxes
# -----
@property
def xaxes(self):
"""
Sets the list of x axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N xaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'xaxes' property is an info array that may be specified as:
* a list of elements where:
The 'xaxes[i]' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
list
"""
return self["xaxes"]
@xaxes.setter
def xaxes(self, val):
self["xaxes"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# yaxes
# -----
@property
def yaxes(self):
"""
Sets the list of y axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N yaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'yaxes' property is an info array that may be specified as:
* a list of elements where:
The 'yaxes[i]' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
list
"""
return self["yaxes"]
@yaxes.setter
def yaxes(self, val):
self["yaxes"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default
the values are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
diagonal
:class:`plotly.graph_objects.splom.Diagonal` instance
or dict with compatible properties
dimensions
A tuple of
:class:`plotly.graph_objects.splom.Dimension` instances
or dicts with compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.splom.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.splom.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.splom.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.splom.Selected` instance
or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
:class:`plotly.graph_objects.splom.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.splom.Unselected` instance
or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
diagonal=None,
dimensions=None,
dimensiondefaults=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
showlowerhalf=None,
showupperhalf=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxes=None,
xhoverformat=None,
yaxes=None,
yhoverformat=None,
**kwargs
):
"""
Construct a new Splom object
Splom traces generate scatter plot matrix visualizations. Each
splom `dimensions` items correspond to a generated axis. Values
for each of those dimensions are set in `dimensions[i].values`.
Splom traces support all `scattergl` marker style attributes.
Specify `layout.grid` attributes and/or layout x-axis and
y-axis attributes for more control over the axis positioning
and style.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Splom`
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
diagonal
:class:`plotly.graph_objects.splom.Diagonal` instance
or dict with compatible properties
dimensions
A tuple of
:class:`plotly.graph_objects.splom.Dimension` instances
or dicts with compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.splom.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.splom.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.splom.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.splom.Selected` instance
or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
:class:`plotly.graph_objects.splom.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.splom.Unselected` instance
or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`xaxis.hoverformat`.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By
default the values are formatted using
`yaxis.hoverformat`.
Returns
-------
Splom
"""
super(Splom, self).__init__("splom")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Splom
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Splom`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("diagonal", None)
_v = diagonal if diagonal is not None else _v
if _v is not None:
self["diagonal"] = _v
_v = arg.pop("dimensions", None)
_v = dimensions if dimensions is not None else _v
if _v is not None:
self["dimensions"] = _v
_v = arg.pop("dimensiondefaults", None)
_v = dimensiondefaults if dimensiondefaults is not None else _v
if _v is not None:
self["dimensiondefaults"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showlowerhalf", None)
_v = showlowerhalf if showlowerhalf is not None else _v
if _v is not None:
self["showlowerhalf"] = _v
_v = arg.pop("showupperhalf", None)
_v = showupperhalf if showupperhalf is not None else _v
if _v is not None:
self["showupperhalf"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("xaxes", None)
_v = xaxes if xaxes is not None else _v
if _v is not None:
self["xaxes"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("yaxes", None)
_v = yaxes if yaxes is not None else _v
if _v is not None:
self["yaxes"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
# Read-only literals
# ------------------
self._props["type"] = "splom"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
ankurankan/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
florian-f/sklearn | sklearn/tests/test_lda.py | 22 | 1521 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from .. import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]])
def test_lda_predict():
"""
LDA classification.
This checks that LDA implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = lda.LDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
def test_lda_transform():
clf = lda.LDA()
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
| bsd-3-clause |
romyny/cbir_binary_code | libs/imr/DeepSearcher.py | 1 | 3473 | # Project: cbir_binary_code
# File: DeepSearcher
# Written by: Romuald FOTSO
# Licensed: MIT License
# Copyright (c) 2017
import datetime
import h5py
import numpy as np
from sklearn.metrics import hamming_loss
from sklearn.metrics.pairwise import pairwise_distances
from libs.imr import dists
from collections import namedtuple
class DeepSearcher:
def __init__(self, deepDBPath, distanceMetric=dists.chi2_distance):
# open both binary code and features vectors
self.deepDB = h5py.File(deepDBPath, mode="r")
self.num_df = 0
"""
self.deepDB = None
self.d_deepDB = {}
for caffe_mdl in l_caffe_mdl:
self.d_deepDB[caffe_mdl.name] = h5py.File(caffe_mdl.deepDB, mode="r")
self.num_df += len(self.d_deepDB[caffe_mdl.name]['deepfeatures'])
"""
# store distance metric selected
self.distanceMetric = distanceMetric
def search(self, qry_binarycode, qry_fVector, numResults=10, maxCandidates=200):
# start the timer to track how long the search took
startTime = datetime.datetime.now()
# determine the candidates and sort them in ascending order so they can
# be used to compare feature vector similarities
l_candidates = self.findCandidates(qry_binarycode, maxCandidates)
done_t1 = (datetime.datetime.now() - startTime).total_seconds()
start_t2 = datetime.datetime.now()
l_cand_fn, l_cand_id = [], []
for (_, im_fn, im_id) in l_candidates:
l_cand_fn.append(im_fn)
l_cand_id.append(im_id)
# grab feature vector of selected candidates
l_image_ids = self.deepDB["image_ids"]
l_cand_id = sorted(l_cand_id)
l_can_fVector = self.deepDB["deepfeatures"][l_cand_id]
results = {}
for (can_id, can_fVector) in zip(l_cand_id, l_can_fVector):
# compute distance between the two feature vector
d = dists.chi2_distance(qry_fVector, can_fVector)
d = float(d) / float(len(can_fVector))
if (int)(d * 100) > 0:
results[can_id] = d
# sort all results such that small distance values are in the top
results = sorted([(v, l_image_ids[k], k) for (k, v) in results.items()])
results = results[:numResults]
done_t2 = (datetime.datetime.now() - start_t2).total_seconds()
print ("DeepSearcher.search: findcandidate_time on {} s".format(done_t1))
print ("DeepSearcher.search: realsearch_time on {} s".format(done_t2))
# return the search results
SearchResult = namedtuple("SearchResult", ["results", "search_time"])
return SearchResult(results, (datetime.datetime.now() - startTime).total_seconds())
def findCandidates(self, qry_binarycode, maxCandidates):
l_image_ids = self.deepDB["image_ids"]
l_binarycode = self.deepDB["binarycode"]
l_qry_bincode = [qry_binarycode]
qry_D = pairwise_distances(np.array(l_qry_bincode), np.array(l_binarycode), 'hamming')[0]
# get idx sorted in min order
l_idx_sorted = qry_D.argsort()
# sort HAMMING distance in ascending order
maxCandidates = min(maxCandidates, len(l_binarycode))
l_candidates = sorted([(qry_D[k], l_image_ids[k], k) for k in l_idx_sorted])
l_candidates = l_candidates[:maxCandidates]
return l_candidates
def finish(self):
pass
| mit |
sumspr/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
kaiodt/nanook_path_tracking | odometria/plot_odometria.py | 1 | 1204 | #! /usr/bin/env python
# coding=utf-8
import matplotlib.pyplot as plt
ensaio = int(raw_input("Número do Ensaio: "))
path = "/home/nanook/nanook_ros/src/nanook_path_tracking/odometria/ensaios/ensaio_odometria%d.txt" % ensaio
data_file = open(path, 'r')
samples = []
t = []
x = []
y = []
theta = []
##### Leitura do arquivo #####
# Leitura do instante de tempo inicial (primeira linha)
t0 = float(data_file.readline().split()[1])
data_file.seek(0)
for line in data_file:
line = line.split()
samples.append(int(line[0]))
t.append(float(line[1]) - t0)
x.append(float(line[2]))
y.append(float(line[3]))
theta.append(float(line[4]))
data_file.close()
##### Plot #####
plt.close('all')
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(t, x, 'b-')
plt.title('x')
plt.xlabel('Tempo (s)')
plt.ylabel('x (m)')
plt.grid('on')
plt.subplot(2, 2, 2)
plt.plot(t, y, 'b-')
plt.title('y')
plt.xlabel('Tempo (s)')
plt.ylabel('y (m)')
plt.grid('on')
plt.subplot(2, 2, 3)
plt.plot(t, theta, 'b-')
plt.title('theta')
plt.xlabel('Tempo (s)')
plt.ylabel('theta (graus)')
plt.grid('on')
plt.subplot(2, 2, 4)
plt.plot(x, y, 'b-')
plt.title('Pos')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.grid('on')
plt.show() | gpl-3.0 |
quantopian/zipline | zipline/pipeline/loaders/frame.py | 1 | 6665 | """
PipelineLoader accepting a DataFrame as input.
"""
from functools import partial
from interface import implements
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import as_column
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(implements(PipelineLoader)):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex_axis(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column)
| apache-2.0 |
toobaz/pandas | pandas/tests/dtypes/test_generic.py | 2 | 4127 | from warnings import catch_warnings, simplefilter
import numpy as np
from pandas.core.dtypes import generic as gt
import pandas as pd
from pandas.util import testing as tm
class TestABCClasses:
tuples = [[1, 2, 2], ["red", "blue", "red"]]
multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color"))
datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"])
timedelta_index = pd.to_timedelta(np.arange(5), unit="s")
period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M")
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)
with catch_warnings():
simplefilter("ignore", FutureWarning)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_frame = pd.SparseDataFrame({"a": [1, -1, None]})
sparse_array = pd.SparseArray(np.random.randn(10))
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
def test_abc_types(self):
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCPeriod)
assert isinstance(pd.DateOffset(), gt.ABCDateOffset)
assert isinstance(pd.Period("2012", freq="A-DEC").freq, gt.ABCDateOffset)
assert not isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCDateOffset)
assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval)
assert not isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCInterval)
assert isinstance(self.datetime_array, gt.ABCDatetimeArray)
assert not isinstance(self.datetime_index, gt.ABCDatetimeArray)
assert isinstance(self.timedelta_array, gt.ABCTimedeltaArray)
assert not isinstance(self.timedelta_index, gt.ABCTimedeltaArray)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df["three"] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
| bsd-3-clause |
botlabio/autonomio | autonomio/hyperscan.py | 1 | 6654 | import datetime as dt
import pandas as pd
from hyperparameters import load_parameters
from commands import train
def hyperscan(x,
y,
data,
epochs,
flatten,
dropout,
batch_sizes,
batch_sizes_step,
layers,
layers_step,
activation_out,
neuron_max,
losses,
optimizers,
activations,
shapes):
'''
mode = 'auto' will scan through all
'selective' will scan through selected
When you have selective mode on, then just
set the parameters you don't want to scan
and leave the rest to 'auto'. Those that are
on 'auto' will be scanned.
Input can be either string for a single parameter,
or a list for multiple parameters.
'''
temp_list = []
if losses is 'auto':
losses = load_parameters('losses')
elif type(losses) is str:
losses = [losses]
if activations is 'auto':
activations = load_parameters('activations')
elif type(activations) is str:
activations = [activations]
if optimizers is 'auto':
optimizers = load_parameters('optimizers')
elif type(optimizers) is str:
optimizers = [optimizers]
if shapes is 'auto':
shapes = load_parameters('shapes')
elif type(shapes) is str:
shapes = [shapes]
if layers is 'auto':
layers = range(2, 15, layers_step)
elif type(layers) is int:
layers = [layers]
elif type(layers) is list:
layers = range(layers[0], layers[1], layers_step)
if batch_sizes is 'auto':
batch_sizes = range(2, 15, batch_sizes_step)
elif type(batch_sizes) is int:
batch_sizes = [batch_sizes]
elif type(batch_sizes) is list:
batch_sizes = range(batch_sizes[0], batch_sizes[1], batch_sizes_step)
a = len(losses)
b = len(shapes)
c = len(activations)
d = len(optimizers)
e = len(batch_sizes)
f = len(layers)
no_of_tries = a * b * c * d * e * f
start_time = dt.datetime.now()
print("Total tries in this scan: %d" % no_of_tries)
print("Scan started on: %s" % start_time.strftime('%H:%M'))
column_list = ['train_acc', 'train_acc_mean', 'train_acc_min',
'train_acc_max', 'train_acc_std', 'train_loss',
'train_loss_mean', 'train_loss_min', 'train_loss_max',
'train_loss_std', 'test_acc', 'test_acc_mean',
'test_acc_min', 'test_acc_max', 'test_acc_std', 'test_loss',
'test_loss_mean', 'test_loss_min', 'test_loss_max',
'test_loss_std', 'shape', 'activation', 'activation_out',
'loss', 'optimizer', 'epochs', 'layers', 'features',
'dropout', 'batch_size', 'max_neurons', 'network_scale']
counter = 0
for loss in losses:
for activation in activations:
for optimizer in optimizers:
for shape in shapes:
for layer in layers:
for batch_size in batch_sizes:
counter += 1
temp = train(x,
y,
data,
epoch=epochs,
flatten=flatten,
dropout=dropout,
layers=layer,
batch_size=batch_size,
activation_out=activation_out,
neuron_max=neuron_max,
hyperscan=True,
loss=loss,
activation=activation,
optimizer=optimizer,
shape=shape)
out = _data_prep(temp)
temp_list.append(out)
if counter == 1:
try_time = dt.datetime.now()
temp = (try_time - start_time) * no_of_tries
finish_est = temp + start_time
finish_est = finish_est.strftime('%H:%M')
print("Estimated finish: %s" % finish_est)
# creating a backup to a file every 50 tries
if counter % 50 == 0:
backup_to_csv = _to_df(temp_list, column_list)
backup_to_csv.to_csv('hyperscan.csv')
print('tries left: %d' % no_of_tries - counter)
df = _to_df(temp_list, column_list)
return df
def _to_df(data, cols):
'''Dataframe maker
Takes the input of the scan and puts it in to
a dataframe. This is to avoid having to use
the same code twice.
'''
df = pd.DataFrame(data)
df.columns = cols
return df
def _data_prep(data):
'''
Prepares the data for appending to dataframe round by round.
'''
a = data[1][-10:]['train_acc'].median()
b = data[1][-10:]['train_acc'].mean()
c = data[1]['train_acc'].min()
d = data[1]['train_acc'].max()
e = data[1][-10:]['train_acc'].std()
f = data[1][-10:]['train_loss'].median()
g = data[1][-10:]['train_loss'].mean()
h = data[1]['train_loss'].min()
i = data[1]['train_loss'].max()
j = data[1][-10:]['train_loss'].std()
k = data[1][-10:]['test_acc'].median()
l = data[1][-10:]['test_acc'].mean()
m = data[1]['test_acc'].min()
n = data[1]['test_acc'].max()
o = data[1][-10:]['test_acc'].std()
p = data[1][-10:]['test_loss'].median()
q = data[1][-10:]['test_loss'].mean()
r = data[1]['test_loss'].min()
s = data[1]['test_loss'].max()
t = data[1][-10:]['test_loss'].std()
u = data[0]['shape']
v = data[2]['activation']
w = data[2]['activation_out']
x = data[2]['loss']
y = data[2]['optimizer']
z = data[0]['epochs']
aa = data[0]['layers']
ab = data[0]['features']
ac = data[0]['dropout']
ad = data[0]['batch_size']
ae = data[0]['max_neurons']
af = data[0]['network_scale']
out = [a, b, c, d, e, f, g, h, i, j, k, l, m, n,
o, p, q, r, s, t, u, v, w, x, y, z, aa, ab, ac, ad, ae, af]
return out
| mit |
lindsayad/sympy | sympy/plotting/tests/test_plot_implicit.py | 6 | 2827 | import warnings
from sympy import (plot_implicit, cos, Symbol, symbols, Eq, sin, re, And, Or, exp, I,
tan, pi)
from sympy.plotting.plot import unset_show
from tempfile import NamedTemporaryFile
from sympy.utilities.pytest import skip
from sympy.external import import_module
#Set plots not to show
unset_show()
def tmp_file(name=''):
return NamedTemporaryFile(suffix='.png').name
def plot_and_save(expr, *args, **kwargs):
name = kwargs.pop('name', '')
p = plot_implicit(expr, *args, **kwargs)
p.save(tmp_file(name))
# Close the plot to avoid a warning from matplotlib
p._backend.close()
def plot_implicit_tests(name):
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#implicit plot tests
plot_and_save(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2), name=name)
plot_and_save(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4), name=name)
plot_and_save(y > 1 / x, (x, -5, 5),
(y, -2, 2), name=name)
plot_and_save(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2), name=name)
plot_and_save(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2), name=name)
plot_and_save(y <= x**2, (x, -3, 3),
(y, -1, 5), name=name)
#Test all input args for plot_implicit
plot_and_save(Eq(y**2, x**3 - x))
plot_and_save(Eq(y**2, x**3 - x), adaptive=False)
plot_and_save(Eq(y**2, x**3 - x), adaptive=False, points=500)
plot_and_save(y > x, (x, -5, 5))
plot_and_save(And(y > exp(x), y > x + 2))
plot_and_save(Or(y > x, y > -x))
plot_and_save(x**2 - 1, (x, -5, 5))
plot_and_save(x**2 - 1)
plot_and_save(y > x, depth=-5)
plot_and_save(y > x, depth=5)
plot_and_save(y > cos(x), adaptive=False)
plot_and_save(y < cos(x), adaptive=False)
plot_and_save(And(y > cos(x), Or(y > x, Eq(y, x))))
plot_and_save(y - cos(pi / x))
#Test plots which cannot be rendered using the adaptive algorithm
#TODO: catch the warning.
plot_and_save(Eq(y, re(cos(x) + I*sin(x))), name=name)
with warnings.catch_warnings(record=True) as w:
plot_and_save(x**2 - 1, legend='An implicit plot')
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labelled objects found' in str(w[0].message)
def test_line_color():
x, y = symbols('x, y')
p = plot_implicit(x**2 + y**2 - 1, line_color="green", show=False)
assert p._series[0].line_color == "green"
p = plot_implicit(x**2 + y**2 - 1, line_color='r', show=False)
assert p._series[0].line_color == "r"
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
plot_implicit_tests('test')
test_line_color()
else:
skip("Matplotlib not the default backend")
| bsd-3-clause |
I2Cvb/data_balancing | src/data_loading/__init__.py | 2 | 3790 | """
The :mod:`data_balancing.data_loading` module includes utilities to load datasets,
that takes advantage of :mod:`sklearn.datasets`.
"""
# from .base import load_diabetes
# from .base import load_digits
# from .base import load_files
# from .base import load_iris
# from .base import load_linnerud
# from .base import load_boston
from sklearn.datasets.base import get_data_home
from sklearn.datasets.base import clear_data_home
# from .base import load_sample_images
# from .base import load_sample_image
# from .covtype import fetch_covtype
# from .mlcomp import load_mlcomp
# from .lfw import load_lfw_pairs
# from .lfw import load_lfw_people
# from .lfw import fetch_lfw_pairs
# from .lfw import fetch_lfw_people
# from .twenty_newsgroups import fetch_20newsgroups
# from .twenty_newsgroups import fetch_20newsgroups_vectorized
from sklearn.datasets.mldata import fetch_mldata, mldata_filename
# from .samples_generator import make_classification
# from .samples_generator import make_multilabel_classification
# from .samples_generator import make_hastie_10_2
# from .samples_generator import make_regression
# from .samples_generator import make_blobs
# from .samples_generator import make_moons
# from .samples_generator import make_circles
# from .samples_generator import make_friedman1
# from .samples_generator import make_friedman2
# from .samples_generator import make_friedman3
# from .samples_generator import make_low_rank_matrix
# from .samples_generator import make_sparse_coded_signal
# from .samples_generator import make_sparse_uncorrelated
# from .samples_generator import make_spd_matrix
# from .samples_generator import make_swiss_roll
# from .samples_generator import make_s_curve
# from .samples_generator import make_sparse_spd_matrix
# from .samples_generator import make_gaussian_quantiles
# from .samples_generator import make_biclusters
# from .samples_generator import make_checkerboard
# from .svmlight_format import load_svmlight_file
# from .svmlight_format import load_svmlight_files
# from .svmlight_format import dump_svmlight_file
# from .olivetti_faces import fetch_olivetti_faces
# from .species_distributions import fetch_species_distributions
# from .california_housing import fetch_california_housing
__all__ = ['clear_data_home',
# 'dump_svmlight_file',
# 'fetch_20newsgroups',
# 'fetch_20newsgroups_vectorized',
# 'fetch_lfw_pairs',
# 'fetch_lfw_people',
'fetch_mldata',
# 'fetch_olivetti_faces',
# 'fetch_species_distributions',
# 'fetch_california_housing',
# 'fetch_covtype',
'get_data_home',
# 'load_boston',
# 'load_diabetes',
# 'load_digits',
# 'load_files',
# 'load_iris',
# 'load_lfw_pairs',
# 'load_lfw_people',
# 'load_linnerud',
# 'load_mlcomp',
# 'load_sample_image',
# 'load_sample_images',
# 'load_svmlight_file',
# 'load_svmlight_files',
# 'make_biclusters',
# 'make_blobs',
# 'make_circles',
# 'make_classification',
# 'make_checkerboard',
# 'make_friedman1',
# 'make_friedman2',
# 'make_friedman3',
# 'make_gaussian_quantiles',
# 'make_hastie_10_2',
# 'make_low_rank_matrix',
# 'make_moons',
# 'make_multilabel_classification',
# 'make_regression',
# 'make_s_curve',
# 'make_sparse_coded_signal',
# 'make_sparse_spd_matrix',
# 'make_sparse_uncorrelated',
# 'make_spd_matrix',
# 'make_swiss_roll',
'mldata_filename',
]
| mit |
netsamir/dotfiles | files/vim/bundle/YouCompleteMe/third_party/ycmd/third_party/python-future/tests/test_past/test_translation.py | 9 | 22246 | # -*- coding: utf-8 -*-
"""
Tests for the Py2-like class:`basestring` type.
"""
from __future__ import absolute_import, division, print_function
import os
import textwrap
import sys
import pprint
import tempfile
import os
import io
from subprocess import Popen, PIPE
from past import utils
from past.builtins import basestring, str as oldstr, unicode
from past.translation import install_hooks, remove_hooks, common_substring
from future.tests.base import (unittest, CodeHandler, skip26,
expectedFailurePY3, expectedFailurePY26)
class TestTranslate(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp() + os.path.sep
# def tearDown(self):
# remove_hooks()
def test_common_substring(self):
s1 = '/home/user/anaconda/envs/future3/lib/python3.3/lib-dynload/math.cpython-33m.so'
s2 = '/home/user/anaconda/envs/future3/lib/python3.3/urllib/__init__.py'
c = '/home/user/anaconda/envs/future3/lib/python3.3'
self.assertEqual(c, common_substring(s1, s2))
s1 = r'/Users/Fred Flintstone/Python3.3/lib/something'
s2 = r'/Users/Fred Flintstone/Python3.3/lib/somethingelse'
c = r'/Users/Fred Flintstone/Python3.3/lib'
self.assertEqual(c, common_substring(s1, s2))
def write_and_import(self, code, modulename='mymodule'):
self.assertTrue('.py' not in modulename)
filename = modulename + '.py'
if isinstance(code, bytes):
code = code.decode('utf-8')
# Be explicit about encoding the temp file as UTF-8 (issue #63):
with io.open(self.tempdir + filename, 'w', encoding='utf-8') as f:
f.write(textwrap.dedent(code).strip() + '\n')
# meta_path_len = len(sys.meta_path)
install_hooks(modulename)
# print('Hooks installed')
# assert len(sys.meta_path) == 1 + meta_path_len
# print('sys.meta_path is: {0}'.format(sys.meta_path))
module = None
sys.path.insert(0, self.tempdir)
try:
module = __import__(modulename)
except SyntaxError:
print('Bombed!')
else:
print('Succeeded!')
finally:
remove_hooks()
# print('Hooks removed')
sys.path.remove(self.tempdir)
return module
def test_print_statement(self):
code = """
print 'Hello from a Python 2-style print statement!'
finished = True
"""
printer = self.write_and_import(code, 'printer')
self.assertTrue(printer.finished)
def test_exec_statement(self):
code = """
exec 'x = 5 + 2'
"""
module = self.write_and_import(code, 'execer')
self.assertEqual(module.x, 7)
def test_div(self):
code = """
x = 3 / 2
"""
module = self.write_and_import(code, 'div')
self.assertEqual(module.x, 1)
def test_import_future_standard_library(self):
"""
Does futurized Py3-like code like this work under autotranslation??
"""
code = """
from future import standard_library
standard_library.install_hooks()
import configparser
"""
module = self.write_and_import(code, 'future_standard_library')
self.assertTrue('configparser' in dir(module))
from future import standard_library
standard_library.remove_hooks()
def test_old_builtin_functions(self):
code = """
# a = raw_input()
import sys
b = open(sys.executable, 'rb')
b.close()
def is_even(x):
return x % 2 == 0
c = filter(is_even, range(10))
def double(x):
return x * 2
d = map(double, c)
e = isinstance('abcd', str)
for g in xrange(10**3):
pass
# super(MyClass, self)
"""
module = self.write_and_import(code, 'test_builtin_functions')
self.assertTrue(hasattr(module.b, 'readlines'))
self.assertTrue(isinstance(module.c, list))
self.assertEqual(module.c, [0, 2, 4, 6, 8])
self.assertEqual(module.d, [0, 4, 8, 12, 16])
self.assertTrue(module.e)
@expectedFailurePY3
def test_import_builtin_types(self):
code = """
s1 = 'abcd'
s2 = u'abcd'
b1 = b'abcd'
b2 = s2.encode('utf-8')
d1 = {}
d2 = dict((i, i**2) for i in range(10))
i1 = 1923482349324234L
i2 = 1923482349324234
"""
module = self.write_and_import(code, 'test_builtin_types')
self.assertTrue(isinstance(module.s1, oldstr))
self.assertTrue(isinstance(module.s2, unicode))
self.assertTrue(isinstance(module.b1, oldstr))
def test_xrange(self):
code = '''
total = 0
for i in xrange(10):
total += i
'''
module = self.write_and_import(code, 'xrange')
self.assertEqual(module.total, 45)
def test_exception_syntax(self):
"""
Test of whether futurize handles the old-style exception syntax
"""
code = """
value = 'string'
try:
value += 10
except TypeError, e: # old exception syntax
value += ': success!'
"""
module = self.write_and_import(code, 'py2_exceptions')
self.assertEqual(module.value, 'string: success!')
# class TestFuturizeSimple(CodeHandler):
# """
# This class contains snippets of Python 2 code (invalid Python 3) and
# tests for whether they can be imported correctly from Python 3 with the
# import hooks.
# """
#
# @unittest.expectedFailure
# def test_problematic_string(self):
# """ This string generates a SyntaxError on Python 3 unless it has
# an r prefix.
# """
# before = r"""
# s = 'The folder is "C:\Users"'.
# """
# after = r"""
# s = r'The folder is "C:\Users"'.
# """
# self.convert_check(before, after)
#
# def test_tobytes(self):
# """
# The --tobytes option converts all UNADORNED string literals 'abcd' to b'abcd'.
# It does apply to multi-line strings but doesn't apply if it's a raw
# string, because ur'abcd' is a SyntaxError on Python 2 and br'abcd' is a
# SyntaxError on Python 3.
# """
# before = r"""
# s0 = '1234'
# s1 = '''5678
# '''
# s2 = "9abc"
# # Unchanged:
# s3 = r'1234'
# s4 = R"defg"
# s5 = u'hijk'
# s6 = u"lmno"
# s7 = b'lmno'
# s8 = b"pqrs"
# """
# after = r"""
# s0 = b'1234'
# s1 = b'''5678
# '''
# s2 = b"9abc"
# # Unchanged:
# s3 = r'1234'
# s4 = R"defg"
# s5 = u'hijk'
# s6 = u"lmno"
# s7 = b'lmno'
# s8 = b"pqrs"
# """
# self.convert_check(before, after, tobytes=True)
#
# @unittest.expectedFailure
# def test_izip(self):
# before = """
# from itertools import izip
# for (a, b) in izip([1, 3, 5], [2, 4, 6]):
# pass
# """
# after = """
# from __future__ import unicode_literals
# from future.builtins import zip
# for (a, b) in zip([1, 3, 5], [2, 4, 6]):
# pass
# """
# self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
#
# @unittest.expectedFailure
# def test_no_unneeded_list_calls(self):
# """
# TODO: get this working
# """
# code = """
# for (a, b) in zip(range(3), range(3, 6)):
# pass
# """
# self.unchanged(code)
#
# def test_xrange(self):
# code = '''
# for i in xrange(10):
# pass
# '''
# self.convert(code)
#
# @unittest.expectedFailure
# def test_source_coding_utf8(self):
# """
# Tests to ensure that the source coding line is not corrupted or
# removed. It must be left as the first line in the file (including
# before any __future__ imports). Also tests whether the unicode
# characters in this encoding are parsed correctly and left alone.
# """
# code = """
# # -*- coding: utf-8 -*-
# icons = [u"◐", u"◓", u"◑", u"◒"]
# """
# self.unchanged(code)
#
# def test_exception_syntax(self):
# """
# Test of whether futurize handles the old-style exception syntax
# """
# before = """
# try:
# pass
# except IOError, e:
# val = e.errno
# """
# after = """
# try:
# pass
# except IOError as e:
# val = e.errno
# """
# self.convert_check(before, after)
#
# def test_super(self):
# """
# This tests whether futurize keeps the old two-argument super() calls the
# same as before. It should, because this still works in Py3.
# """
# code = '''
# class VerboseList(list):
# def append(self, item):
# print('Adding an item')
# super(VerboseList, self).append(item)
# '''
# self.unchanged(code)
#
# @unittest.expectedFailure
# def test_file(self):
# """
# file() as a synonym for open() is obsolete and invalid on Python 3.
# """
# before = '''
# f = file(__file__)
# data = f.read()
# f.close()
# '''
# after = '''
# f = open(__file__)
# data = f.read()
# f.close()
# '''
# self.convert_check(before, after)
#
# def test_apply(self):
# before = '''
# def addup(*x):
# return sum(x)
#
# assert apply(addup, (10,20)) == 30
# '''
# after = """
# def addup(*x):
# return sum(x)
#
# assert addup(*(10,20)) == 30
# """
# self.convert_check(before, after)
#
# @unittest.skip('not implemented yet')
# def test_download_pypi_package_and_test(self, package_name='future'):
# URL = 'http://pypi.python.org/pypi/{0}/json'
#
# import requests
# r = requests.get(URL.format(package_name))
# pprint.pprint(r.json())
#
# download_url = r.json()['urls'][0]['url']
# filename = r.json()['urls'][0]['filename']
# # r2 = requests.get(download_url)
# # with open('/tmp/' + filename, 'w') as tarball:
# # tarball.write(r2.content)
#
# def test_raw_input(self):
# """
# Passes in a string to the waiting input() after futurize
# conversion.
#
# The code is the first snippet from these docs:
# http://docs.python.org/2/library/2to3.html
# """
# before = """
# def greet(name):
# print "Hello, {0}!".format(name)
# print "What's your name?"
# name = raw_input()
# greet(name)
# """
# desired = """
# def greet(name):
# print("Hello, {0}!".format(name))
# print("What's your name?")
# name = input()
# greet(name)
# """
# self.convert_check(before, desired, run=False)
#
# for interpreter in self.interpreters:
# p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],
# stdout=PIPE, stdin=PIPE, stderr=PIPE)
# (stdout, stderr) = p1.communicate(b'Ed')
# self.assertEqual(stdout, b"What's your name?\nHello, Ed!\n")
#
# def test_literal_prefixes_are_not_stripped(self):
# """
# Tests to ensure that the u'' and b'' prefixes on unicode strings and
# byte strings are not removed by the futurize script. Removing the
# prefixes on Py3.3+ is unnecessary and loses some information -- namely,
# that the strings have explicitly been marked as unicode or bytes,
# rather than just e.g. a guess by some automated tool about what they
# are.
# """
# code = '''
# s = u'unicode string'
# b = b'byte string'
# '''
# self.unchanged(code)
#
# @unittest.expectedFailure
# def test_division(self):
# """
# TODO: implement this!
# """
# before = """
# x = 1 / 2
# """
# after = """
# from future.utils import old_div
# x = old_div(1, 2)
# """
# self.convert_check(before, after, stages=[1])
#
#
# class TestFuturizeRenamedStdlib(CodeHandler):
# def test_renamed_modules(self):
# before = """
# import ConfigParser
# import copy_reg
# import cPickle
# import cStringIO
#
# s = cStringIO.StringIO('blah')
# """
# after = """
# import configparser
# import copyreg
# import pickle
# import io
#
# s = io.StringIO('blah')
# """
# self.convert_check(before, after)
#
# @unittest.expectedFailure
# def test_urllib_refactor(self):
# # Code like this using urllib is refactored by futurize --stage2 to use
# # the new Py3 module names, but ``future`` doesn't support urllib yet.
# before = """
# import urllib
#
# URL = 'http://pypi.python.org/pypi/future/json'
# package_name = 'future'
# r = urllib.urlopen(URL.format(package_name))
# data = r.read()
# """
# after = """
# import urllib.request
#
# URL = 'http://pypi.python.org/pypi/future/json'
# package_name = 'future'
# r = urllib.request.urlopen(URL.format(package_name))
# data = r.read()
# """
# self.convert_check(before, after)
#
# def test_renamed_copy_reg_and_cPickle_modules(self):
# """
# Example from docs.python.org/2/library/copy_reg.html
# """
# before = """
# import copy_reg
# import copy
# import cPickle
# class C(object):
# def __init__(self, a):
# self.a = a
#
# def pickle_c(c):
# print('pickling a C instance...')
# return C, (c.a,)
#
# copy_reg.pickle(C, pickle_c)
# c = C(1)
# d = copy.copy(c)
# p = cPickle.dumps(c)
# """
# after = """
# import copyreg
# import copy
# import pickle
# class C(object):
# def __init__(self, a):
# self.a = a
#
# def pickle_c(c):
# print('pickling a C instance...')
# return C, (c.a,)
#
# copyreg.pickle(C, pickle_c)
# c = C(1)
# d = copy.copy(c)
# p = pickle.dumps(c)
# """
# self.convert_check(before, after)
#
# @unittest.expectedFailure
# def test_Py2_StringIO_module(self):
# """
# Ideally, there would be a fixer for this. For now:
#
# TODO: add the Py3 equivalent for this to the docs
# """
# before = """
# import cStringIO
# s = cStringIO.StringIO('my string')
# assert isinstance(s, cStringIO.InputType)
# """
# after = """
# import io
# s = io.StringIO('my string')
# # assert isinstance(s, io.InputType)
# # There is no io.InputType in Python 3. What should we change this to
# # instead?
# """
# self.convert_check(before, after)
#
#
# class TestFuturizeStage1(CodeHandler):
# # """
# # Tests "stage 1": safe optimizations: modernizing Python 2 code so that it
# # uses print functions, new-style exception syntax, etc.
#
# # The behaviour should not change and this should introduce no dependency on
# # the ``future`` package. It produces more modern Python 2-only code. The
# # goal is to reduce the size of the real porting patch-set by performing
# # the uncontroversial patches first.
# # """
#
# def test_apply(self):
# """
# apply() should be changed by futurize --stage1
# """
# before = '''
# def f(a, b):
# return a + b
#
# args = (1, 2)
# assert apply(f, args) == 3
# assert apply(f, ('a', 'b')) == 'ab'
# '''
# after = '''
# def f(a, b):
# return a + b
#
# args = (1, 2)
# assert f(*args) == 3
# assert f(*('a', 'b')) == 'ab'
# '''
# self.convert_check(before, after, stages=[1])
#
# def test_xrange(self):
# """
# xrange should not be changed by futurize --stage1
# """
# code = '''
# for i in xrange(10):
# pass
# '''
# self.unchanged(code, stages=[1])
#
# @unittest.expectedFailure
# def test_absolute_import_changes(self):
# """
# Implicit relative imports should be converted to absolute or explicit
# relative imports correctly.
#
# Issue #16 (with porting bokeh/bbmodel.py)
# """
# with open('specialmodels.py', 'w') as f:
# f.write('pass')
#
# before = """
# import specialmodels.pandasmodel
# specialmodels.pandasmodel.blah()
# """
# after = """
# from __future__ import absolute_import
# from .specialmodels import pandasmodel
# pandasmodel.blah()
# """
# self.convert_check(before, after, stages=[1])
#
# def test_safe_futurize_imports(self):
# """
# The standard library module names should not be changed until stage 2
# """
# before = """
# import ConfigParser
# import HTMLParser
# import collections
#
# ConfigParser.ConfigParser
# HTMLParser.HTMLParser
# d = collections.OrderedDict()
# """
# self.unchanged(before, stages=[1])
#
# def test_print(self):
# before = """
# print 'Hello'
# """
# after = """
# print('Hello')
# """
# self.convert_check(before, after, stages=[1])
#
# before = """
# import sys
# print >> sys.stderr, 'Hello', 'world'
# """
# after = """
# import sys
# print('Hello', 'world', file=sys.stderr)
# """
# self.convert_check(before, after, stages=[1])
#
# def test_print_already_function(self):
# """
# Running futurize --stage1 should not add a second set of parentheses
# """
# before = """
# print('Hello')
# """
# self.unchanged(before, stages=[1])
#
# @unittest.expectedFailure
# def test_print_already_function_complex(self):
# """
# Running futurize --stage1 does add a second second set of parentheses
# in this case. This is because the underlying lib2to3 has two distinct
# grammars -- with a print statement and with a print function -- and,
# when going forwards (2 to both), futurize assumes print is a statement,
# which raises a ParseError.
# """
# before = """
# import sys
# print('Hello', 'world', file=sys.stderr)
# """
# self.unchanged(before, stages=[1])
#
# def test_exceptions(self):
# before = """
# try:
# raise AttributeError('blah')
# except AttributeError, e:
# pass
# """
# after = """
# try:
# raise AttributeError('blah')
# except AttributeError as e:
# pass
# """
# self.convert_check(before, after, stages=[1])
#
# @unittest.expectedFailure
# def test_string_exceptions(self):
# """
# 2to3 does not convert string exceptions: see
# http://python3porting.com/differences.html.
# """
# before = """
# try:
# raise "old string exception"
# except Exception, e:
# pass
# """
# after = """
# try:
# raise Exception("old string exception")
# except Exception as e:
# pass
# """
# self.convert_check(before, after, stages=[1])
#
# @unittest.expectedFailure
# def test_oldstyle_classes(self):
# """
# We don't convert old-style classes to new-style automatically. Should we?
# """
# before = """
# class Blah:
# pass
# """
# after = """
# class Blah(object):
# pass
# """
# self.convert_check(before, after, stages=[1])
#
#
# def test_octal_literals(self):
# before = """
# mode = 0644
# """
# after = """
# mode = 0o644
# """
# self.convert_check(before, after)
#
# def test_long_int_literals(self):
# before = """
# bignumber = 12345678901234567890L
# """
# after = """
# bignumber = 12345678901234567890
# """
# self.convert_check(before, after)
#
# def test___future___import_position(self):
# """
# Issue #4: __future__ imports inserted too low in file: SyntaxError
# """
# code = """
# # Comments here
# # and here
# __version__=''' $Id$ '''
# __doc__="A Sequencer class counts things. It aids numbering and formatting lists."
# __all__='Sequencer getSequencer setSequencer'.split()
# #
# # another comment
# #
#
# CONSTANTS = [ 0, 01, 011, 0111, 012, 02, 021, 0211, 02111, 013 ]
# _RN_LETTERS = "IVXLCDM"
#
# def my_func(value):
# pass
#
# ''' Docstring-like comment here '''
# """
# self.convert(code)
if __name__ == '__main__':
unittest.main()
| unlicense |
csvelasq/TepUnderScenarios | tepmodel/TepRobustnessAnalysis.py | 1 | 9305 | import Utils
import logging
import tepmodel as tep
import pandas as pd
import numpy as np
from scipy.spatial import ConvexHull
import collections
class TriangularProbabilityDistribution(object):
"""Formulas for calculating the pdf and the cdf of a triangular probability distribution"""
def __init__(self, a=0, b=1, c=0.5):
self.a = a
self.b = b
self.c = c
def eval_pdf(self, x):
# from https://en.wikipedia.org/wiki/Triangular_distribution
if x < self.a:
return 0
elif x < self.c:
return 2 * (x - self.a) / ((self.b - self.a) * (self.c - self.a))
elif x <= self.b:
return 2 * (self.b - x) / ((self.b - self.a) * (self.b - self.c))
return 0
def eval_cdf(self, x):
# from https://en.wikipedia.org/wiki/Triangular_distribution
if x < self.a:
return 0
elif x < self.c:
return (x - self.a) ** 2 / ((self.b - self.a) * (self.c - self.a))
elif x <= self.b:
return 1 - (self.b - x) ** 2 / ((self.b - self.a) * (self.b - self.c))
return 1
class MySimplexForRobustness(object):
"""Represents a facet of the convex pareto front for TEP under scenarios with no probabilities"""
def __init__(self, scenario_probabilities,
vertices_plans, touches_border, neighbour_simplices):
self.scenario_probabilities = scenario_probabilities
self.vertices = vertices_plans
self.touches_border = touches_border # True if this facet touches a border (i.e. one vertex is in the border, or is an optimal solution for some scenario)
self.neighbour_simplices = neighbour_simplices
class SecondOrderRobustnessMeasureCalculator(object):
"""Calculates second order robustness measure for the set of efficient transmission expansion alternatives
To use, just initialize with a given set of (efficient) alternatives.
Results are saved to the list 'plans_with_robustness'
"""
def __init__(self, efficient_alternatives):
# type: (List[TepScenariosModel.StaticTePlan]) -> None
self.efficient_alternatives = efficient_alternatives
# Add the worse possible point, in order to ensure a meaningful convex hull
self.worse_plan_vertex = []
for s in self.efficient_alternatives[0].tep_model.tep_system.scenarios:
self.worse_plan_vertex.append(max(alt.total_costs[s] for alt in self.efficient_alternatives))
self.worse_plan_vertex = np.array(self.worse_plan_vertex)
# Convex hull of pareto front is obtained, in order to efficiently calculate the robustness measure
self.efficient_points = np.array(list(alt.total_costs.values() for alt in self.efficient_alternatives))
# self.efficient_points = np.concatenate((self.efficient_points, self.worse_plan_vertex), axis=0)
self.pareto_chull = ConvexHull(self.efficient_points)
# simplices of the convex hull
self.simplices = dict()
simplices_nonpareto_idx = []
# Create simplices
for idx, simplex in enumerate(self.pareto_chull.simplices):
# calculate scenario probabilities based on the normal to each convex simplex
nd_normal = self.pareto_chull.equations[idx][0:-1] # delete last component which is the offset
if np.all(nd_normal < 0):
nd_normal = np.absolute(nd_normal)
nd_prob = nd_normal / sum(nd_normal)
s = MySimplexForRobustness(nd_prob,
list(self.efficient_alternatives[s] for s in simplex),
False, [])
self.simplices[idx] = s
else:
simplices_nonpareto_idx.append(idx)
# Relate my simplices among them with neighbour data from convex hull
for idx, neighbors_idxs in enumerate(self.pareto_chull.neighbors):
if idx in self.simplices.keys():
for neighbor_idx in [s for s in neighbors_idxs]:
if neighbor_idx in simplices_nonpareto_idx:
self.simplices[idx].touches_border = True
else:
self.simplices[idx].neighbour_simplices.append(self.simplices[neighbor_idx])
# Relate expansion plans with simplices and calculate robustness measure
self.plans_with_robustness = []
list_summary_plans_without_robustness = []
list_summary_plans_with_robustness = []
for idx, plan in enumerate(self.efficient_alternatives):
plan_simplices = list(s for s in self.simplices.itervalues() if plan in s.vertices)
is_in_frontier = len(plan_simplices) > 0
if is_in_frontier:
is_in_border = is_in_frontier and len(plan_simplices) < len(plan.tep_model.tep_system.scenarios)
plan_with_robustness = StaticTePlanForRobustnessCalculation(plan, plan_simplices, is_in_border, idx)
self.plans_with_robustness.append(plan_with_robustness)
df_summary = pd.DataFrame(plan_with_robustness.summary, index=['Plan{0}'.format(idx)])
list_summary_plans_with_robustness.append(df_summary)
else:
list_summary_plans_without_robustness.append(plan)
self.df_summary = pd.concat(list_summary_plans_with_robustness)
logging.info(("{} efficient plans processed, {} plans in convex pareto front, {} in concave front")
.format(len(self.efficient_alternatives),
len(list_summary_plans_with_robustness),
len(list_summary_plans_without_robustness))
)
def to_excel(self, excel_filename, sheetname='AlternativesRobustness'):
writer = pd.ExcelWriter(excel_filename, engine='xlsxwriter')
self.to_excel_sheet(writer, sheetname=sheetname)
writer.save()
def to_excel_sheet(self, writer, sheetname='AlternativesRobustness'):
# TODO Write objective function value for each plan also (and operation and investment costs)
Utils.df_to_excel_sheet_autoformat(self.df_summary, writer, sheetname)
class StaticTePlanForRobustnessCalculation(object):
"""Calculates the robustness measure for one particular transmission expansion plan,
based on convex hull information on the pareto front of transmission expansion alternatives"""
def __init__(self, plan, simplices, is_in_border, plan_id):
# type: (tepmodel.StaticTePlan, object, bool) -> None
self.plan = plan
self.plan_id = plan_id
self.simplices = simplices
self.is_in_border = is_in_border
self.robustness_measure = float('nan')
self.summary = collections.OrderedDict()
# self.summary['Plan ID'] = self.plan_id
if not self.is_in_border:
if len(self.simplices) == 2:
probabilities_first_scenario = sorted(list(s.scenario_probabilities[0] for s in self.simplices))
logging.info("Range of probabilities for plan {}: {}".format(self.plan_id,
probabilities_first_scenario)
)
first_scenario_name = self.plan.tep_model.tep_system.scenarios[0].name
self.summary['Probability range {}'.format(first_scenario_name)] = "[{0:.1%} , {1:.1%}]".format(
probabilities_first_scenario[0], probabilities_first_scenario[1])
# Calculates robustness as a probability of optimality given by the integral of second order pdf,
# which can be expressed in closed form in this case (2-d and triangular pdf)
my_prob_distr = TriangularProbabilityDistribution()
self.robustness_measure = my_prob_distr.eval_cdf(probabilities_first_scenario[1]) \
- my_prob_distr.eval_cdf(probabilities_first_scenario[0])
logging.info("Robustness measure for plan {0}: {1:.1%}".format(self.plan_id, self.robustness_measure))
else:
logging.warning(("Robustness measure cannot (yet) be calculated "
"for more than two scenarios (or two simplices per plan), "
"but plan {} has {} simplices").format(self.plan_id,
len(self.simplices)
)
)
else:
# TODO identify the border touched by bordered expansion alternatives and calculate robustness measure for these alternatives also
logging.warning(("Robustness measure cannot (yet) be calculated "
"for plans in the borders of the trade-off curve, "
"such as plan {}").format(self.plan_id)
)
self.summary['Robustness Measure [%]'] = self.robustness_measure
self.summary['Is in border?'] = self.is_in_border
# self.df_summary = pd.DataFrame(self.summary, index=['Plan{0}'.format(self.plan_id)])
| mit |
Clyde-fare/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
thilbern/scikit-learn | sklearn/setup.py | 24 | 2991 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for hmm
config.add_extension(
'_hmmc',
sources=['_hmmc.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
michigraber/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
stefanhenneking/mxnet | example/ssd/detect/detector.py | 7 | 7047 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.testdb import TestDB
from dataset.iterator import DetIter
class Detector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
return result
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect(test_iter, show_timer)
def visualize_detection(self, img, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > thresh:
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
import cv2
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
self.visualize_detection(img, det, classes, thresh)
| apache-2.0 |
IamJeffG/geopandas | setup.py | 1 | 2575 | #!/usr/bin/env/python
"""Installation script
Version handling borrowed from pandas project.
"""
import sys
import os
import warnings
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
`pandas`_ objects.
The goal of GeoPandas is to make working with geospatial data in
python easier. It combines the capabilities of `pandas`_ and `shapely`_,
providing geospatial operations in pandas and a high-level interface
to multiple geometries to shapely. GeoPandas enables you to easily do
operations in python that would otherwise require a spatial database
such as PostGIS.
.. _pandas: http://pandas.pydata.org
.. _shapely: http://toblerity.github.io/shapely
"""
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
if os.environ.get('READTHEDOCS', False) == 'True':
INSTALL_REQUIRES = []
else:
INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev'
try:
import subprocess
try:
pipe = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE).stdout
except OSError:
# msysgit compatibility
pipe = subprocess.Popen(
["git.cmd", "describe", "HEAD"],
stdout=subprocess.PIPE).stdout
rev = pipe.read().strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
FULLVERSION = '%d.%d.%d.dev-%s' % (MAJOR, MINOR, MICRO, rev)
except:
warnings.warn("WARNING: Couldn't get git revision")
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'geopandas', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
write_version_py()
setup(name='geopandas',
version=FULLVERSION,
description='Geographic pandas extensions',
license='BSD',
author='Kelsey Jordahl',
author_email='[email protected]',
url='http://geopandas.org',
long_description=LONG_DESCRIPTION,
packages=['geopandas', 'geopandas.io', 'geopandas.tools'],
install_requires=INSTALL_REQUIRES)
| bsd-3-clause |
pczerkas/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
ZimingLu/HandPoseEstimation | training/NoiseRandomNew.py | 1 | 7377 | import numpy as np
import random
from skimage import draw,data
import h5py
import cv2
import scipy.io as sio
import sys
import os
import math
import matplotlib.pyplot as plt
paths = {}
with open('../path.config', 'r') as f:
for line in f:
name, path = line.split(': ')
print name, path
paths[name] = path
# the data path for train and test img
dataset_path = '/home/gtayx/handPose/NYUdata'
# the total joints are 36
J = 14
img_size = 128
joint_id = np.array([0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32])
fx = 588.03 #640/1.08836710
fy = 587.07 #480/0.8176
fu = 320.
fv = 240.
##############################################################################
# input the image and the position of palm_5(uvd)
def CropImage(image, com):
# u v d info of palm
u, v, d = com
d = d + np.random.choice(np.arange(-30,30))
urand = np.random.choice(np.arange(-30,30))
vrand = np.random.choice(np.arange(-30,30))
u = u + urand
v = v + vrand
# z is equal to d
zstart = d - cube_size_d / 2.
zend = d + cube_size_d / 2.
# transform xyz domain to uvz domain
xstart = int(math.floor((u * d / fx - cube_size / 2.) / d * fx))
xend = int(math.floor((u * d / fx + cube_size / 2.) / d * fx))
ystart = int(math.floor((v * d / fy - cube_size / 2.) / d * fy))
yend = int(math.floor((v * d / fy + cube_size / 2.) / d * fy))
# select the cropped domain, if box is beyond the boundary just pad it
cropped = depth[max(ystart, 0):min(yend, depth.shape[0]), max(xstart, 0):min(xend, depth.shape[1])].copy()
cropped = np.pad(cropped, ((abs(ystart)-max(ystart, 0), abs(yend)-min(yend, depth.shape[0])),
(abs(xstart)-max(xstart, 0), abs(xend)-min(xend, depth.shape[1]))), mode='constant', constant_values=0)
# for the depth beyond box just use the value on the box
msk1 = np.bitwise_and(cropped < zstart, cropped != 0)
msk2 = np.bitwise_and(cropped > zend, cropped != 0)
msk3 = np.bitwise_and(cropped < zstart, cropped == 0)
cropped[msk1] = zstart
cropped[msk2] = zend
# set the 0 to zend
cropped[msk3] = zend
# dsize is 128*128
dsize = (img_size, img_size)
wb = (xend - xstart)
hb = (yend - ystart)
# transform 150*150 to 128*128
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
rz = cv2.resize(cropped, sz)
ret = np.ones(dsize, np.float32) * zend
xstart = int(math.floor(dsize[0] / 2 - rz.shape[1] / 2))
xend = int(xstart + rz.shape[1])
ystart = int(math.floor(dsize[1] / 2 - rz.shape[0] / 2))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
randChoice = (int)(10*random.random())
if(randChoice%2 == 0):
rand = int(100*random.random()) + 40
noise_x_start = wb / 2 - urand + np.random.choice(np.arange(-50,50))- rand/2
noise_y_start = hb / 2 - vrand + np.random.choice(np.arange(-120,60)) - rand/2
noise_x_end = noise_x_start+ rand
noise_y_end = noise_y_start+ rand
cropped[noise_y_start:noise_y_end, noise_x_start:noise_x_end] = int(zend)
else:
noise_x_start = wb / 2 - urand + np.random.choice(np.arange(-50,50))
noise_y_start = hb / 2 - vrand + np.random.choice(np.arange(-120,60))
rr, cc = draw.ellipse(noise_y_start, noise_x_start, np.random.choice(np.arange(20,50)), np.random.choice(np.arange(20,50)))
draw.set_color(cropped, [rr, cc], int(zend))
# transform 150*150 to 128*128
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
rz = cv2.resize(cropped, sz)
ret = np.ones(dsize, np.float32) * zend
xstart = int(math.floor(dsize[0] / 2 - rz.shape[1] / 2))
xend = int(xstart + rz.shape[1])
ystart = int(math.floor(dsize[1] / 2 - rz.shape[0] / 2))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
return ret
##############################################################################
for data_type in ['test', 'train']:
# data_type = 'test'
trainCount = 0
idComb = [[0,20000],[20000,40000],[40000,60000],[60000,72000]]
# select the cube size of the hand
cube_size = 450
cube_size_d = 300
for trainCount in xrange(1,5):
if data_type == 'test' and trainCount > 1:
break
if data_type == 'train':
id_start = idComb[trainCount-1][0]
id_end = idComb[trainCount-1][1]
else:
id_start = 0
id_end = 8000
if data_type == 'train':
data_path = dataset_path + '/train/train'
label_path = dataset_path + '/train/joint_data.mat'
else:
data_path = dataset_path + '/test/'
label_path = dataset_path + '/test/joint_data.mat'
labels = sio.loadmat(label_path)
# save the joint info to joint_uvd and joint_xyz
joint_uvd = labels['joint_uvd'][0]
joint_xyz = labels['joint_xyz'][0]
cnt = 0
depth_h5, joint_h5, com_h5, inds_h5 = [], [], [], []
for id in range(id_start, id_end):
img_path = '{}/depth_1_{:07d}.png'.format(data_path, id + 1)
print 'depth_1_{:07d}.png'.format(id + 1)
# check whether path exist
if not os.path.exists(img_path):
print '{} Not Exists!'.format(img_path)
continue
img = cv2.imread(img_path)
# get depth in each pixel
depth = np.asarray(img[:, :, 0] + img[:, :, 1] * 256)
# put the depth image and the palm info into function and crop it
depth = CropImage(depth, joint_uvd[id, 35]) # + np.random.choice(np.arange(-10,10))
com3D = joint_xyz[id, 35]
# only our interested points we need to transform
joint = joint_xyz[id][joint_id] - com3D
# print joint.shape
depth = ((depth - com3D[2]) / (cube_size_d / 2)).reshape(img_size, img_size)
depth = np.asarray(depth[np.newaxis,...])
# normalize the joint position
joint = np.clip(joint / (cube_size / 2), -1, 1)
joint = joint.reshape(3 * J)
joint = np.asarray(joint[np.newaxis, np.newaxis, ...])
# joint = np.asarray(joint)
# print joint.shape
depth_h5.append(depth.copy())
# print np.asarray(depth_h5).shape
# xyz save in the one file so reshape 3*J
joint_h5.append(joint.copy())
com_h5.append(com3D.copy())
inds_h5.append(id)
cnt += 1
if id > id_end-2:
#if test no random, else random dataset
rng = np.arange(cnt) if data_type == 'test' else np.random.choice(np.arange(cnt), cnt, replace = False)
if data_type == 'test':
dset = h5py.File(dataset_path + '/PalmRandomWithNoise/{}_normal.h5'.format(data_type), 'w')
elif data_type == 'train' and id_start == 0:
dset = h5py.File(dataset_path + '/PalmRandomWithNoise/{}_normal_1.h5'.format(data_type), 'w')
elif data_type == 'train' and id_start == 20000:
dset = h5py.File(dataset_path + '/PalmRandomWithNoise/{}_normal_2.h5'.format(data_type), 'w')
elif data_type == 'train' and id_start == 40000:
dset = h5py.File(dataset_path + '/PalmRandomWithNoise/{}_normal_3.h5'.format(data_type), 'w')
elif data_type == 'train' and id_start == 60000:
dset = h5py.File(dataset_path + '/PalmRandomWithNoise/{}_normal_4.h5'.format(data_type), 'w')
dset['depth'] = np.asarray(depth_h5)[rng]
dset['joint'] = np.asarray(joint_h5)[rng]
dset.close()
depth_h5, joint_h5, com_h5, inds_h5 = [], [], [], []
cnt = 0
break
| gpl-3.0 |
prabhjyotsingh/zeppelin | python/src/main/resources/python/bootstrap_sql.py | 60 | 1189 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
| apache-2.0 |
slundberg/shap | shap/plots/_waterfall.py | 1 | 26069 | import numpy as np
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from ._labels import labels
from ..utils import safe_isinstance, format_value
from . import colors
# TODO: If we make a JS version of this plot then we could let users click on a bar and then see the dependence
# plot that is associated with that feature get overlayed on the plot...it would quickly allow users to answer
# why a feature is pushing down or up. Perhaps the best way to do this would be with an ICE plot hanging off
# of the bar...
def waterfall(shap_values, max_display=10, show=True):
""" Plots an explantion of a single prediction as a waterfall plot.
The SHAP value of a feature represents the impact of the evidence provided by that feature on the model's
output. The waterfall plot is designed to visually display how the SHAP values (evidence) of each feature
move the model output from our prior expectation under the background data distribution, to the final model
prediction given the evidence of all the features. Features are sorted by the magnitude of their SHAP values
with the smallest magnitude features grouped together at the bottom of the plot when the number of features
in the models exceeds the max_display parameter.
Parameters
----------
shap_values : Explanation
A one-dimensional Explanation object that contains the feature values and SHAP values to plot.
max_display : str
The maximum number of features to plot.
show : bool
Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot
to be customized further after it has been created.
"""
base_values = shap_values.base_values
features = shap_values.data
feature_names = shap_values.feature_names
lower_bounds = getattr(shap_values, "lower_bounds", None)
upper_bounds = getattr(shap_values, "upper_bounds", None)
values = shap_values.values
# make sure we only have a single output to explain
if (type(base_values) == np.ndarray and len(base_values) > 0) or type(base_values) == list:
raise Exception("waterfall_plot requires a scalar base_values of the model output as the first " \
"parameter, but you have passed an array as the first parameter! " \
"Try shap.waterfall_plot(explainer.base_values[0], values[0], X[0]) or " \
"for multi-output models try " \
"shap.waterfall_plot(explainer.base_values[0], values[0][0], X[0]).")
# make sure we only have a single explanation to plot
if len(values.shape) == 2:
raise Exception("The waterfall_plot can currently only plot a single explanation but a matrix of explanations was passed!")
# unwrap pandas series
if safe_isinstance(features, "pandas.core.series.Series"):
if feature_names is None:
feature_names = list(features.index)
features = features.values
# fallback feature names
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(values))])
# init variables we use for tracking the plot locations
num_features = min(max_display, len(values))
row_height = 0.5
rng = range(num_features - 1, -1, -1)
order = np.argsort(-np.abs(values))
pos_lefts = []
pos_inds = []
pos_widths = []
pos_low = []
pos_high = []
neg_lefts = []
neg_inds = []
neg_widths = []
neg_low = []
neg_high = []
loc = base_values + values.sum()
yticklabels = ["" for i in range(num_features + 1)]
# size the plot based on how many features we are plotting
pl.gcf().set_size_inches(8, num_features * row_height + 1.5)
# see how many individual (vs. grouped at the end) features we are plotting
if num_features == len(values):
num_individual = num_features
else:
num_individual = num_features - 1
# compute the locations of the individual features and plot the dashed connecting lines
for i in range(num_individual):
sval = values[order[i]]
loc -= sval
if sval >= 0:
pos_inds.append(rng[i])
pos_widths.append(sval)
if lower_bounds is not None:
pos_low.append(lower_bounds[order[i]])
pos_high.append(upper_bounds[order[i]])
pos_lefts.append(loc)
else:
neg_inds.append(rng[i])
neg_widths.append(sval)
if lower_bounds is not None:
neg_low.append(lower_bounds[order[i]])
neg_high.append(upper_bounds[order[i]])
neg_lefts.append(loc)
if num_individual != num_features or i + 4 < num_individual:
pl.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
if features is None:
yticklabels[rng[i]] = feature_names[order[i]]
else:
yticklabels[rng[i]] = format_value(features[order[i]], "%0.03f") + " = " + feature_names[order[i]]
# add a last grouped feature to represent the impact of all the features we didn't show
if num_features < len(values):
yticklabels[0] = "%d other features" % (len(values) - num_features + 1)
remaining_impact = base_values - loc
if remaining_impact < 0:
pos_inds.append(0)
pos_widths.append(-remaining_impact)
pos_lefts.append(loc + remaining_impact)
c = colors.red_rgb
else:
neg_inds.append(0)
neg_widths.append(-remaining_impact)
neg_lefts.append(loc + remaining_impact)
c = colors.blue_rgb
points = pos_lefts + list(np.array(pos_lefts) + np.array(pos_widths)) + neg_lefts + list(np.array(neg_lefts) + np.array(neg_widths))
dataw = np.max(points) - np.min(points)
# draw invisible bars just for sizing the axes
label_padding = np.array([0.1*dataw if w < 1 else 0 for w in pos_widths])
pl.barh(pos_inds, np.array(pos_widths) + label_padding + 0.02*dataw, left=np.array(pos_lefts) - 0.01*dataw, color=colors.red_rgb, alpha=0)
label_padding = np.array([-0.1*dataw if -w < 1 else 0 for w in neg_widths])
pl.barh(neg_inds, np.array(neg_widths) + label_padding - 0.02*dataw, left=np.array(neg_lefts) + 0.01*dataw, color=colors.blue_rgb, alpha=0)
# define variable we need for plotting the arrows
head_length = 0.08
bar_width = 0.8
xlen = pl.xlim()[1] - pl.xlim()[0]
fig = pl.gcf()
ax = pl.gca()
xticks = ax.get_xticks()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
bbox_to_xscale = xlen/width
hl_scaled = bbox_to_xscale * head_length
renderer = fig.canvas.get_renderer()
# draw the positive arrows
for i in range(len(pos_inds)):
dist = pos_widths[i]
arrow_obj = pl.arrow(
pos_lefts[i], pos_inds[i], max(dist-hl_scaled, 0.000001), 0,
head_length=min(dist, hl_scaled),
color=colors.red_rgb, width=bar_width,
head_width=bar_width
)
if pos_low is not None and i < len(pos_low):
pl.errorbar(
pos_lefts[i] + pos_widths[i], pos_inds[i],
xerr=np.array([[pos_widths[i] - pos_low[i]], [pos_high[i] - pos_widths[i]]]),
ecolor=colors.light_red_rgb
)
txt_obj = pl.text(
pos_lefts[i] + 0.5*dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=12
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = pl.text(
pos_lefts[i] + (5/72)*bbox_to_xscale + dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,
fontsize=12
)
# draw the negative arrows
for i in range(len(neg_inds)):
dist = neg_widths[i]
arrow_obj = pl.arrow(
neg_lefts[i], neg_inds[i], -max(-dist-hl_scaled, 0.000001), 0,
head_length=min(-dist, hl_scaled),
color=colors.blue_rgb, width=bar_width,
head_width=bar_width
)
if neg_low is not None and i < len(neg_low):
pl.errorbar(
neg_lefts[i] + neg_widths[i], neg_inds[i],
xerr=np.array([[neg_widths[i] - neg_low[i]], [neg_high[i] - neg_widths[i]]]),
ecolor=colors.light_blue_rgb
)
txt_obj = pl.text(
neg_lefts[i] + 0.5*dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=12
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = pl.text(
neg_lefts[i] - (5/72)*bbox_to_xscale + dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,
fontsize=12
)
# draw the y-ticks twice, once in gray and then again with just the feature names in black
ytick_pos = list(range(num_features)) + list(np.arange(num_features)+1e-8) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
pl.yticks(ytick_pos, yticklabels[:-1] + [l.split('=')[-1] for l in yticklabels[:-1]], fontsize=13)
# put horizontal lines for each feature row
for i in range(num_features):
pl.axhline(i, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
# mark the prior expected value and the model prediction
pl.axvline(base_values, 0, 1/num_features, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
fx = base_values + values.sum()
pl.axvline(fx, 0, 1, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
# clean up the main axis
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
ax.tick_params(labelsize=13)
#pl.xlabel("\nModel output", fontsize=12)
# draw the E[f(X)] tick mark
xmin,xmax = ax.get_xlim()
ax2=ax.twiny()
ax2.set_xlim(xmin,xmax)
ax2.set_xticks([base_values, base_values+1e-8]) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
ax2.set_xticklabels(["\n$E[f(X)]$","\n$ = "+format_value(base_values, "%0.03f")+"$"], fontsize=12, ha="left")
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
# draw the f(x) tick mark
ax3=ax2.twiny()
ax3.set_xlim(xmin,xmax)
ax3.set_xticks([base_values + values.sum(), base_values + values.sum() + 1e-8]) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
ax3.set_xticklabels(["$f(x)$","$ = "+format_value(fx, "%0.03f")+"$"], fontsize=12, ha="left")
tick_labels = ax3.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-10/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(12/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['left'].set_visible(False)
# adjust the position of the E[f(X)] = x.xx label
tick_labels = ax2.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-20/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(22/72., -1/72., fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
# color the y tick labels that have the feature values as gray
# (these fall behind the black ones with just the feature name)
tick_labels = ax.yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color("#999999")
if show:
pl.show()
def waterfall_legacy(expected_value, shap_values=None, features=None, feature_names=None, max_display=10, show=True):
""" Plots an explantion of a single prediction as a waterfall plot.
The SHAP value of a feature represents the impact of the evidence provided by that feature on the model's
output. The waterfall plot is designed to visually display how the SHAP values (evidence) of each feature
move the model output from our prior expectation under the background data distribution, to the final model
prediction given the evidence of all the features. Features are sorted by the magnitude of their SHAP values
with the smallest magnitude features grouped together at the bottom of the plot when the number of features
in the models exceeds the max_display parameter.
Parameters
----------
expected_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
One dimensional array of SHAP values.
features : numpy.array
One dimensional array of feature values. This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
max_display : str
The maximum number of features to plot.
show : bool
Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot
to be customized further after it has been created.
"""
# support passing an explanation object
upper_bounds = None
lower_bounds = None
if str(type(expected_value)).endswith("Explanation'>"):
shap_exp = expected_value
expected_value = shap_exp.expected_value
shap_values = shap_exp.values
features = shap_exp.data
feature_names = shap_exp.feature_names
lower_bounds = getattr(shap_exp, "lower_bounds", None)
upper_bounds = getattr(shap_exp, "upper_bounds", None)
# make sure we only have a single output to explain
if (type(expected_value) == np.ndarray and len(expected_value) > 0) or type(expected_value) == list:
raise Exception("waterfall_plot requires a scalar expected_value of the model output as the first " \
"parameter, but you have passed an array as the first parameter! " \
"Try shap.waterfall_plot(explainer.expected_value[0], shap_values[0], X[0]) or " \
"for multi-output models try " \
"shap.waterfall_plot(explainer.expected_value[0], shap_values[0][0], X[0]).")
# make sure we only have a single explanation to plot
if len(shap_values.shape) == 2:
raise Exception("The waterfall_plot can currently only plot a single explanation but a matrix of explanations was passed!")
# unwrap pandas series
if safe_isinstance(features, "pandas.core.series.Series"):
if feature_names is None:
feature_names = list(features.index)
features = features.values
# fallback feature names
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])
# init variables we use for tracking the plot locations
num_features = min(max_display, len(shap_values))
row_height = 0.5
rng = range(num_features - 1, -1, -1)
order = np.argsort(-np.abs(shap_values))
pos_lefts = []
pos_inds = []
pos_widths = []
pos_low = []
pos_high = []
neg_lefts = []
neg_inds = []
neg_widths = []
neg_low = []
neg_high = []
loc = expected_value + shap_values.sum()
yticklabels = ["" for i in range(num_features + 1)]
# size the plot based on how many features we are plotting
pl.gcf().set_size_inches(8, num_features * row_height + 1.5)
# see how many individual (vs. grouped at the end) features we are plotting
if num_features == len(shap_values):
num_individual = num_features
else:
num_individual = num_features - 1
# compute the locations of the individual features and plot the dashed connecting lines
for i in range(num_individual):
sval = shap_values[order[i]]
loc -= sval
if sval >= 0:
pos_inds.append(rng[i])
pos_widths.append(sval)
if lower_bounds is not None:
pos_low.append(lower_bounds[order[i]])
pos_high.append(upper_bounds[order[i]])
pos_lefts.append(loc)
else:
neg_inds.append(rng[i])
neg_widths.append(sval)
if lower_bounds is not None:
neg_low.append(lower_bounds[order[i]])
neg_high.append(upper_bounds[order[i]])
neg_lefts.append(loc)
if num_individual != num_features or i + 4 < num_individual:
pl.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
if features is None:
yticklabels[rng[i]] = feature_names[order[i]]
else:
yticklabels[rng[i]] = format_value(features[order[i]], "%0.03f") + " = " + feature_names[order[i]]
# add a last grouped feature to represent the impact of all the features we didn't show
if num_features < len(shap_values):
yticklabels[0] = "%d other features" % (len(shap_values) - num_features + 1)
remaining_impact = expected_value - loc
if remaining_impact < 0:
pos_inds.append(0)
pos_widths.append(-remaining_impact)
pos_lefts.append(loc + remaining_impact)
c = colors.red_rgb
else:
neg_inds.append(0)
neg_widths.append(-remaining_impact)
neg_lefts.append(loc + remaining_impact)
c = colors.blue_rgb
points = pos_lefts + list(np.array(pos_lefts) + np.array(pos_widths)) + neg_lefts + list(np.array(neg_lefts) + np.array(neg_widths))
dataw = np.max(points) - np.min(points)
# draw invisible bars just for sizing the axes
label_padding = np.array([0.1*dataw if w < 1 else 0 for w in pos_widths])
pl.barh(pos_inds, np.array(pos_widths) + label_padding + 0.02*dataw, left=np.array(pos_lefts) - 0.01*dataw, color=colors.red_rgb, alpha=0)
label_padding = np.array([-0.1*dataw if -w < 1 else 0 for w in neg_widths])
pl.barh(neg_inds, np.array(neg_widths) + label_padding - 0.02*dataw, left=np.array(neg_lefts) + 0.01*dataw, color=colors.blue_rgb, alpha=0)
# define variable we need for plotting the arrows
head_length = 0.08
bar_width = 0.8
xlen = pl.xlim()[1] - pl.xlim()[0]
fig = pl.gcf()
ax = pl.gca()
xticks = ax.get_xticks()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
bbox_to_xscale = xlen/width
hl_scaled = bbox_to_xscale * head_length
renderer = fig.canvas.get_renderer()
# draw the positive arrows
for i in range(len(pos_inds)):
dist = pos_widths[i]
arrow_obj = pl.arrow(
pos_lefts[i], pos_inds[i], max(dist-hl_scaled, 0.000001), 0,
head_length=min(dist, hl_scaled),
color=colors.red_rgb, width=bar_width,
head_width=bar_width
)
if pos_low is not None and i < len(pos_low):
pl.errorbar(
pos_lefts[i] + pos_widths[i], pos_inds[i],
xerr=np.array([[pos_widths[i] - pos_low[i]], [pos_high[i] - pos_widths[i]]]),
ecolor=colors.light_red_rgb
)
txt_obj = pl.text(
pos_lefts[i] + 0.5*dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=12
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = pl.text(
pos_lefts[i] + (5/72)*bbox_to_xscale + dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,
fontsize=12
)
# draw the negative arrows
for i in range(len(neg_inds)):
dist = neg_widths[i]
arrow_obj = pl.arrow(
neg_lefts[i], neg_inds[i], -max(-dist-hl_scaled, 0.000001), 0,
head_length=min(-dist, hl_scaled),
color=colors.blue_rgb, width=bar_width,
head_width=bar_width
)
if neg_low is not None and i < len(neg_low):
pl.errorbar(
neg_lefts[i] + neg_widths[i], neg_inds[i],
xerr=np.array([[neg_widths[i] - neg_low[i]], [neg_high[i] - neg_widths[i]]]),
ecolor=colors.light_blue_rgb
)
txt_obj = pl.text(
neg_lefts[i] + 0.5*dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=12
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = pl.text(
neg_lefts[i] - (5/72)*bbox_to_xscale + dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,
fontsize=12
)
# draw the y-ticks twice, once in gray and then again with just the feature names in black
pl.yticks(list(range(num_features))*2, yticklabels[:-1] + [l.split('=')[-1] for l in yticklabels[:-1]], fontsize=13)
# put horizontal lines for each feature row
for i in range(num_features):
pl.axhline(i, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
# mark the prior expected value and the model prediction
pl.axvline(expected_value, 0, 1/num_features, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
fx = expected_value + shap_values.sum()
pl.axvline(fx, 0, 1, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
# clean up the main axis
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
ax.tick_params(labelsize=13)
#pl.xlabel("\nModel output", fontsize=12)
# draw the E[f(X)] tick mark
xmin,xmax = ax.get_xlim()
ax2=ax.twiny()
ax2.set_xlim(xmin,xmax)
ax2.set_xticks([expected_value, expected_value])
ax2.set_xticklabels(["\n$E[f(X)]$","\n$ = "+format_value(expected_value, "%0.03f")+"$"], fontsize=12, ha="left")
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
# draw the f(x) tick mark
ax3=ax2.twiny()
ax3.set_xlim(xmin,xmax)
ax3.set_xticks([expected_value + shap_values.sum()] * 2)
ax3.set_xticklabels(["$f(x)$","$ = "+format_value(fx, "%0.03f")+"$"], fontsize=12, ha="left")
tick_labels = ax3.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-10/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(12/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['left'].set_visible(False)
# adjust the position of the E[f(X)] = x.xx label
tick_labels = ax2.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-20/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(22/72., -1/72., fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
# color the y tick labels that have the feature values as gray
# (these fall behind the black ones with just the feature name)
tick_labels = ax.yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color("#999999")
if show:
pl.show() | mit |
giorgiop/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 55 | 7386 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
dongjoon-hyun/spark | python/pyspark/testing/sqlutils.py | 23 | 7740 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = str(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = str(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = str(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT() # type: ignore
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
pianomania/scikit-learn | examples/linear_model/plot_sgd_iris.py | 58 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
dstndstn/astrometry.net | solver/ver.py | 2 | 4869 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
import math
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from numpy import *
from matplotlib.pylab import figure, plot, xlabel, ylabel, loglog, clf
from matplotlib.pylab import semilogy, show, find, legend, hist, axis
def plotDvsR(ix, iy, fx, fy, R):
IR = argsort(R)
mR = []
mD = []
mI = []
# Look at index stars in order of R.
for ii in range(len(IR)):
i = IR[ii]
x = ix[i]
y = iy[i]
D = sqrt((fx - x)**2 + (fy - y)**2)
# Grab field stars within a matching radius.
r = 5
I = find( D < r )
for j in I:
mR.append(R[i])
mD.append(D[j])
mI.append(ii)
plot(mR, mD, 'ro')
xlabel('Distance from quad center')
ylabel('Match distance')
def plotIvsR(ix, iy, fx, fy, cx, cy):
RF = sqrt((fx - cx)**2 + (fy - cy)**2)
mD2 = []
mI2 = []
for i in range(len(RF)):
r = 5
D = sqrt((ix - fx[i])**2 + (iy - fy[i])**2)
I = find( D < r )
for j in I:
mD2.append(D[j])
mI2.append(i)
#plot(mI, mD, 'ro', mI2, mD2, 'bo')
plot(mI2, mD2, 'bo')
xlabel('Index star number')
ylabel('Match distance')
#legend(('Sorted by dist from quad center', 'Sorted by brightness'))
if __name__ == '__main__':
# Index stars
ixy = pyfits.open('ver/index.xy.fits')
ixy = ixy[1].data
ix = ixy.field(0)
iy = ixy.field(1)
NI = len(ix)
# Field stars
fxy = pyfits.open('ver/field.xy.fits')
fxy = fxy[1].data
fx = fxy.field(0)
fy = fxy.field(1)
NF = len(fx)
# The matched quad.
mf = pyfits.open('ver/match.fits')
mf = mf[1].data
quad = mf.field('quadpix')[0]
quad = quad[0:8].reshape(4,2)
qx = quad[:,0]
qy = quad[:,1]
# Quad center.
cx = mean(qx)
cy = mean(qy)
# Grab index stars that are within the field.
iok = find( (ix > min(fx)) *
(ix < max(fx)) *
(iy > min(fy)) *
(iy < max(fy)) )
ix = [ix[i] for i in iok]
iy = [iy[i] for i in iok]
figure(1)
clf()
I = [0, 2, 1, 3, 0]
plot(
[cx], [cy], 'ro',
qx[I], qy[I], 'r-',
ix, iy, 'rx',
fx, fy, 'b+',
)
# RMS quad radius
RQ = sqrt(sum((qx - cx)**2 + (qy - cy)**2) / 4)
# Distance from quad center.
RI = sqrt((ix - cx)**2 + (iy - cy)**2)
RF = sqrt((fx - cx)**2 + (fy - cy)**2)
# Angle from quad center.
AI = array([math.atan2(y - cy, x - cx) for (x,y) in zip(ix,iy)])
AF = array([math.atan2(y - cy, x - cx) for (x,y) in zip(fx,fy)])
# Look at index stars in order of R.
IR = argsort(RI)
allD = array([])
allDR = array([])
allDA = array([])
allR = array([])
allDist = array([])
for i in IR:
# regularizer...
reg = RQ
#
Distscale = 1
DRscale = 1 / RQ
dR = ((RI[i] + reg) / (RF + reg)) - 1.0
dA = AI[i] - AF
# handle wrap-around
absdA = abs(dA)
absdA = vstack((absdA, abs(absdA - 2*math.pi))).min(axis=0)
#D = sqrt(dR**2 + dA**2)
D = sqrt(dR**2 + absdA**2)
D = D / DRscale
Dist = sqrt((ix[i] - fx)**2 + (iy[i] - fy)**2)
Dist = Dist / Distscale
iSmall = array(find((D < 1) + (Dist < 1)))
allD = hstack((allD, D[iSmall]))
allDR = hstack((allDR, dR[iSmall]))
allDA = hstack((allDA, dA[iSmall]))
allR = hstack((allR, repeat(RI[i], len(iSmall))))
allDist = hstack((allDist, Dist[iSmall]))
figure(2)
clf()
plot(allDR, allDA, 'ro', ms=1)
xlabel('DR')
ylabel('DA')
figure(3)
clf()
#plot(allR/RQ, allD, 'r.')
#plot(allR/RQ, vstack((allD, allDist)).min(axis=0), 'r.')
plot(
#allR/RQ, vstack((allD, allDist)).min(axis=0), 'mo',
allR/RQ, allD, 'r+',
allR/RQ, allDist, 'bx'
)
xlabel('R (quad radiuses)')
#ylabel('min( Dist, D(R+A) )')
ylabel('Dist, D(R,A)')
a = axis()
axis([a[0], a[1], 0, 2.0])
legend(('D(R,A)', 'D'))
#allDist = array([])
#allRDist = array([])
#for i in IR:
# Dist = sqrt((ix[i] - fx)**2 + (iy[i] - fy)**2)
# iSmall = array(find(Dist < 5))
# allDist = hstack((allDist, Dist[iSmall]))
# allRDist = hstack((allRDist, repeat(RI[i], len(iSmall))))
#figure(4)
#clf()
#plot(allRDist/RQ, allDist, 'r.')
#xlabel('R')
#ylabel('Dist')
#figure(2)
#clf()
#plotDvsR(ix, iy, RI, fx, fy)
#figure(3)
#clf()
#plotDvsI(ix, iy, fx, fy, cx, cy)
| bsd-3-clause |
lewisli/gemsflowpy | core/tf_struct.py | 1 | 2601 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 14:33:11 2016
@author: lewisli
"""
import numpy as np
import matplotlib.pyplot as plt
class DataSet(object):
def __init__(self, images, labels=None):
"""Construct a DataSet for use with TensorFlow
Args:
images: 3D np array containing (2D) images.
labels: labels corresponding to images (optional)
"""
self._num_dims = images.ndim - 1
self._num_examples = images.shape[self._num_dims]
self._num_rows = images.shape[0]
self._num_cols = images.shape[1]
# Check to see if labels is set
if labels is None:
self._supervised = False
labels = np.zeros(self._num_examples)
else:
assert self._num_examples == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._supervised = True
# Convert shape from [rows, columns, num_examples]
# to [num examples,rows*columns,]
images = images.reshape(self._num_rows*self._num_cols,self. _num_examples)
# Do we need to normalize images???
images = images.astype(np.float32).transpose()
images = (images-images.min())/(images.max() - images.min())
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def display_image(self,index,save_path=None):
fig, ax = plt.subplots(facecolor='white')
ax.imshow(self._images[index,:].reshape(self._num_rows,\
self._num_rows),origin='lower')
if save_path is not None:
pass
plt.show()
| mit |
EmreAtes/spack | var/spack/repos/builtin/packages/py-wcsaxes/package.py | 5 | 1820 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyWcsaxes(PythonPackage):
"""WCSAxes is a framework for making plots of Astronomical data
in Matplotlib."""
homepage = "http://wcsaxes.readthedocs.io/en/latest/index.html"
url = "https://github.com/astrofrog/wcsaxes/archive/v0.8.tar.gz"
version('0.8', 'de1c60fdae4c330bf5ddb9f1ab5ab920')
extends('python', ignore=r'bin/')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
| lgpl-2.1 |
hsiaoyi0504/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
DGrady/pandas | pandas/tests/series/test_alter_axes.py | 5 | 9995 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.index import MultiIndex, RangeIndex
from pandas.compat import lrange, range, zip
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAlterAxes(TestData):
def test_setindex(self):
# wrong type
series = self.series.copy()
pytest.raises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
pytest.raises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
assert isinstance(series.index, Index)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
assert renamed.index[0] == renamer(self.ts.index[0])
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
assert s2.name == 'foo'
assert s.name is None
assert s is not s2
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
assert self.ts.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
assert 'value' in df
df = ser.reset_index(name='value2')
assert 'value2' in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_level(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
columns=['A', 'B', 'C'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
s = df.set_index(['A', 'B'])['C']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C']])
with tm.assert_raises_regex(KeyError, 'Level E '):
s.reset_index(level=['A', 'E'])
# With single-level Index
s = df.set_index('A')['B']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df['B'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
s.reset_index(level=[0, 1, 2])
def test_reset_index_range(self):
# GH 12071
s = pd.Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = pd.DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
def test_rename_axis_inplace(self):
# GH 15704
series = self.ts.copy()
expected = series.rename_axis('foo')
result = series.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in 0, 'index':
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
result = s.copy()
kwargs = {'inplace': inplace}
with tm.assert_produces_warning(warn):
result.set_axis(list('abcd'), axis=axis, **kwargs)
tm.assert_series_equal(result, expected)
# inplace=False
result = s.set_axis(list('abcd'), axis=0, inplace=False)
tm.assert_series_equal(expected, result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = s.set_axis(list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
# wrong values for the "axis" parameter
for axis in 2, 'foo':
with tm.assert_raises_regex(ValueError, 'No axis named'):
s.set_axis(list('abcd'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in 0, 'index':
with tm.assert_produces_warning(FutureWarning):
result = s.set_axis(0, list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
demianw/dipy | doc/examples/reconst_dsi_metrics.py | 13 | 4539 | """
===============================
Calculate DSI-based scalar maps
===============================
We show how to calculate two DSI-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your dataset.
First import the necessary modules:
"""
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.get_affine()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab, qgrid_size=35, filter_width=18.5)
"""
Lets just use one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Normalize the signal by the b0
"""
dataslice = dataslice / (dataslice[..., 0, None]).astype(np.float)
"""
Calculate the return to origin probability on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = dsmodel.fit(dataslice).rtop_signal()
"""
Now we calculate the return to origin probability on the propagator,
that corresponds to its central value.
By default the propagator is divided by its sum in order to obtain a properly normalized pdf,
however this normalization changes the values of rtop, therefore in order to compare it
with the rtop previously calculated on the signal we turn the normalized parameter to false.
"""
print('Calculating... rtop_pdf')
rtop_pdf = dsmodel.fit(dataslice).rtop_pdf(normalized=False)
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Leaving the normalized parameter to the default changes the values of the
rtop but not the contrast between the voxels.
"""
print('Calculating... rtop_pdf_norm')
rtop_pdf_norm = dsmodel.fit(dataslice).rtop_pdf()
"""
Let's calculate the mean square displacement on the normalized propagator.
"""
print('Calculating... msd_norm')
msd_norm = dsmodel.fit(dataslice).msd_discrete()
"""
Turning the normalized parameter to false makes it possible to calculate
the mean square displacement on the propagator without normalization.
"""
print('Calculating... msd')
msd = dsmodel.fit(dataslice).msd_discrete(normalized=False)
"""
Show the rtop images and save them in rtop.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf_norm')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='rtop_pdf')
ax3.set_axis_off()
ind = ax3.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('rtop.png')
"""
.. figure:: rtop.png
:align: center
**Return to origin probability**.
Show the msd images and save them in msd.png.
"""
fig = plt.figure(figsize=(7, 3))
ax1 = fig.add_subplot(1, 2, 1, title='msd_norm')
ax1.set_axis_off()
ind = ax1.imshow(msd_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(1, 2, 2, title='msd')
ax2.set_axis_off()
ind = ax2.imshow(msd.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('msd.png')
"""
.. figure:: msd.png
:align: center
**Mean square displacement**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et al., "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et al., "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008
.. include:: ../links_names.inc
"""
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/linear_model/tests/test_coordinate_descent.py | 3 | 21287 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actually, the parameters alpha = 0 should not be allowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Compute the lasso_path
f = ignore_warnings
coef_path = [e.coef_ for e in f(lasso_path)(X, y, alphas=alphas,
return_models=True,
fit_intercept=False)]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
fit_intercept=False,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
np.asarray(coef_path).T, decimal=1)
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
"""Test that both random and cyclic selection give the same results.
Ensure that the test models fully converge and check a wide
range of conditions.
"""
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
MechCoder/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 81 | 2525 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
OpenDrift/opendrift | examples/example_oil_entrainment_rate.py | 1 | 1339 | #!/usr/bin/env python
"""
Oil entrainment rate
====================
"""
import numpy as np
import matplotlib.pyplot as plt
from opendrift.models.physics_methods import oil_wave_entrainment_rate_li2017
#%%
# Viscosities from 0 to 20 Pa*s / kg/ms
vis = np.linspace(0, 20, 100)
colors = ['b', 'g', 'r']
lstyles = ['-', '-.']
#%%
# Calculating and plotting the entrainment rate as function of viscosity
# for 3 wind speeds
fig, ax = plt.subplots()
for wind, color in zip([6, 10, 15], colors):
# Entrainment rate from Li (2017)
for ift, ls in zip([.003, 10], lstyles):
r = oil_wave_entrainment_rate_li2017(
dynamic_viscosity=vis, oil_density=950, interfacial_tension=ift,
wind_speed=wind)
# from instantaneous rate (s-1) we calculate the probability of entrainment within one hour:
p1h = 1-np.exp(-3600*r)
ax.plot(vis, p1h, color+ls, label='Li(2017), %s m/s wind, IFT: %s' % (wind, ift))
plt.legend()
ax.set_xlim(vis.min(), vis.max())
ax.set_ylim(0, 1.05)
# Make second x-axis showing viscosity in Centipoise
ax2 = ax.twiny()
x1, x2 = ax.get_xlim()
ax2.set_xlim(1000*x1, 1000*x2)
ax2.figure.canvas.draw()
ax2.set_xlabel('Dynamic viscosity [Centipoise]')
ax.set_ylabel('Fraction entrained in 1 hour')
ax.set_xlabel('Dynamic viscosity [Pa*s] / [kg/ms]')
plt.show()
| gpl-2.0 |
JanetMatsen/meta4_bins_janalysis | compare_bins/support_files/filter_aggregated_data.py | 1 | 1369 | import pandas as pd
def load_percent_identies_result():
return pd.read_csv("percent_identities.tsv" ,sep = '\t')
def subset_given_colnames(name_list, dataframe=None):
# Copied from compare_fauzi_bins/
# 160601_ANI_improvements--use_percent_coverage.ipynb
for name in name_list:
assert " " not in name, \
"bin names don't have spaces! Fix {}".format(name)
if dataframe is None:
full_data = load_percent_identies_result()
else:
full_data = dataframe
assert isinstance(full_data, pd.DataFrame)
all_names = full_data['query name'].unique()
# build a list of names to pick out.
plot_names = []
for org_name in name_list:
found_names = [n for n in all_names if org_name in n]
if len(found_names) == 0:
print("WARNING: no bin names found for string {}".format(
org_name
))
plot_names += found_names
assert(len(plot_names) > 0 ), \
"didn't find any organism names based on name_list"
# reduce to the desired organisms.
selected_data = full_data.copy()
selected_data = selected_data[selected_data['query name'].isin(plot_names)]
selected_data = selected_data[selected_data['ref name'].isin(plot_names)]
print("num rows selected: {}".format(selected_data.shape[0]))
return selected_data | bsd-2-clause |
anntzer/scipy | scipy/signal/waveforms.py | 18 | 20287 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : float, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if isinstance(t, str):
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must "
"be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
Examples
--------
The following will be used in the examples:
>>> from scipy.signal import chirp, spectrogram
>>> import matplotlib.pyplot as plt
For the first example, we'll plot the waveform for a linear chirp
from 6 Hz to 1 Hz over 10 seconds:
>>> t = np.linspace(0, 10, 1500)
>>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
>>> plt.plot(t, w)
>>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
>>> plt.xlabel('t (sec)')
>>> plt.show()
For the remaining examples, we'll use higher frequency ranges,
and demonstrate the result using `scipy.signal.spectrogram`.
We'll use a 4 second interval sampled at 7200 Hz.
>>> fs = 7200
>>> T = 4
>>> t = np.arange(0, int(T*fs)) / fs
We'll use this function to plot the spectrogram in each example.
>>> def plot_spectrogram(title, w, fs):
... ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576)
... plt.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r', shading='gouraud')
... plt.title(title)
... plt.xlabel('t (sec)')
... plt.ylabel('Frequency (Hz)')
... plt.grid()
...
Quadratic chirp from 1500 Hz to 250 Hz
(vertex of the parabolic curve of the frequency is at t=0):
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic')
>>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs)
>>> plt.show()
Quadratic chirp from 1500 Hz to 250 Hz
(vertex of the parabolic curve of the frequency is at t=T):
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic',
... vertex_zero=False)
>>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' +
... '(vertex_zero=False)', w, fs)
>>> plt.show()
Logarithmic chirp from 1500 Hz to 250 Hz:
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic')
>>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs)
>>> plt.show()
Hyperbolic chirp from 1500 Hz to 250 Hz:
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic')
>>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs)
>>> plt.show()
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by `chirp` to generate its output.
See `chirp` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
Examples
--------
Compute the waveform with instantaneous frequency::
f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
over the interval 0 <= t <= 10.
>>> from scipy.signal import sweep_poly
>>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
>>> t = np.linspace(0, 10, 5001)
>>> w = sweep_poly(t, p)
Plot it:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, w)
>>> plt.title("Sweep Poly\\nwith frequency " +
... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, p(t), 'r', label='f(t)')
>>> plt.legend()
>>> plt.xlabel('t')
>>> plt.tight_layout()
>>> plt.show()
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., ``numpy.int8``. Default is
``numpy.float64``.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
| bsd-3-clause |
gfolego/vangogh | src/analysis/classify.py | 1 | 7049 | #!/usr/bin/python
# classify.py
# Copyright 2016
# Guilherme Folego ([email protected])
# Otavio Gomes ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
============================================================
Classify
============================================================
Classify using different aggregation methods
"""
import sys
import argparse
from multiprocessing import cpu_count
import numpy as np
from sklearn import svm, grid_search, cross_validation, metrics
import cPickle as pickle
from common import CACHE_SIZE, C_RANGE, GAMMA_RANGE, CLASS_WEIGHTS, N_ITER, K_FOLD, \
iter_type, dir_type, print_verbose, set_verbose_level, get_verbose_level, get_n_cores, set_n_cores
from gather_data import gen_data, parse_class
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--dir', type=dir_type, required=True,
help='data directory')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level')
parser.add_argument('-c', '--cores', default=get_n_cores(), type=int,
choices=xrange(1, cpu_count()+1),
help='number of cores to be used (default: %d)' % get_n_cores())
parser.add_argument('-m', '--model', type=str, required=True,
help='path to import the classifier model')
parser.add_argument('-a', '--aggregation',
choices=['mode','sum','mean','median','far'],
default='far',
help='aggregation method (default: far)')
parser.add_argument('-g', '--gtruth', action='store_true',
help='ground truth class is available (default: False)')
args = parser.parse_args(args=argv)
return args
def eval_perf(classification):
y_true = []
y_pred = []
for (key, value) in classification.iteritems():
y_true.extend([parse_class(key)])
y_pred.extend([value])
print_verbose("Classification pair: %s" % str((key, value)), 4)
print_verbose("True classes: %s" % str(y_true), 5)
print_verbose("Predicted classes: %s" % str(y_pred), 5)
# Print results
print_verbose("True classes: %s" % str(y_true), 2)
print_verbose("Predicted classes: %s" % str(y_pred), 2)
# Print metrics
print_verbose("Confusion Matrix:", 0)
print_verbose(metrics.confusion_matrix(y_true, y_pred), 0)
print_verbose("Classification Report:", 0)
print_verbose(metrics.classification_report(y_true, y_pred), 0)
def agg_pred_mode(pred):
print_verbose('Aggregating using mode ...', 0)
counts = np.bincount(pred)
return np.argmax(counts)
def agg_pred_dist_sumall(pred, classes):
print_verbose('Aggregating by summing all distances ...', 0)
tot = np.sum(pred)
cl = classes[1] if (tot > 0) else classes[0]
return cl
def agg_pred_dist_far(pred, classes):
print_verbose('Aggregating by using the farthest point class ...', 0)
arr_pos = pred[pred >= 0]
max_pos = np.max(arr_pos) if (arr_pos.size > 0) else 0
arr_neg = pred[pred <= 0]
max_neg = np.abs(np.min(arr_neg)) if (arr_neg.size > 0) else 0
cl = classes[1] if (max_pos > max_neg) else classes[0]
return cl
def agg_pred_dist_meangroup(pred, classes):
print_verbose('Aggregating by comparing distance groups means ...', 0)
arr_pos = pred[pred >= 0]
avg_pos = np.mean(arr_pos) if (arr_pos.size > 0) else 0
arr_neg = pred[pred <= 0]
avg_neg = np.abs(np.mean(arr_neg)) if (arr_neg.size > 0) else 0
cl = classes[1] if (avg_pos > avg_neg) else classes[0]
return cl
def agg_pred_dist_mediangroup(pred, classes):
print_verbose('Aggregating by comparing distance groups medians ...', 0)
arr_pos = pred[pred >= 0]
med_pos = np.median(arr_pos) if (arr_pos.size > 0) else 0
arr_neg = pred[pred <= 0]
med_neg = np.abs(np.median(arr_neg)) if (arr_neg.size > 0) else 0
cl = classes[1] if (med_pos > med_neg) else classes[0]
return cl
def classify(data, labels, args):
classification = {}
# Read model
with open(args.model, "rb") as f:
model = pickle.load(f)
print_verbose("Model [%0.2f%%]: %s" % (model.best_score_*100, str(model.best_estimator_)), 4)
# Classify each label
lolo = cross_validation.LeaveOneLabelOut(labels)
print_verbose("LeaveOneOut: %s" % str(lolo), 5)
for train_index, test_index in lolo:
print_verbose("Test index: %s" % str(test_index), 5)
print_verbose("Classifying label: %s" % str(labels[test_index[0]]), 4)
# Classify
if args.aggregation == 'mode':
pred = model.predict(data[test_index])
else:
pred = model.decision_function(data[test_index])
print_verbose("Patch prediction: %s" % str(pred), 4)
# Aggregate
if args.aggregation == 'mode':
res = agg_pred_mode(pred)
elif args.aggregation == 'sum':
res = agg_pred_dist_sumall(pred, model.best_estimator_.classes_)
elif args.aggregation == 'far':
res = agg_pred_dist_far(pred, model.best_estimator_.classes_)
elif args.aggregation == 'mean':
res = agg_pred_dist_meangroup(pred, model.best_estimator_.classes_)
elif args.aggregation == 'median':
res = agg_pred_dist_mediangroup(pred, model.best_estimator_.classes_)
print_verbose("Aggregate result: %s" % str(res), 4)
# Append to final result
classification[labels[test_index[0]]] = res
print_verbose("Classification: %s" % str(classification), 5)
return classification
def main(argv):
# Parse arguments
args = parse_args(argv)
set_verbose_level(args.verbose)
set_n_cores(args.cores)
print_verbose("Args: %s" % str(args), 1)
# Some tests
data, labels = gen_data(args.dir, False)
print_verbose('Data: %s' % str(data), 5)
print_verbose('Labels: %s' % str(labels), 4)
print_verbose('Data shape: %s' % str(data.shape), 2)
print_verbose('Labels shape: %s' % str(labels.shape), 2)
classification = classify(data, labels, args)
print_verbose('Final classification: %s' % str(classification), 0)
# Evaluate performance
if args.gtruth:
eval_perf(classification)
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
albu5/deepGroup | scene-activity/scene_lstm_test.py | 1 | 1960 | from keras.layers import Input, LSTM, Dense, Masking, merge, Dropout
from keras.models import Model, load_model
from keras.optimizers import adam
from keras.utils import to_categorical
import numpy as np
from numpy import genfromtxt, savetxt
from matplotlib import pyplot as plt
data_dir = './split2/scene_activity_data_test_none'
model_path = './scenelstm/scenelstm-split1.h5'
batch_size = 128
trainX1 = (genfromtxt(data_dir + '/' + 'trainX1.csv', delimiter=','))
trainX2 = np.reshape(genfromtxt(data_dir + '/' + 'trainX2.csv', delimiter=','), newshape=(-1, 10, 2048))
trainY = to_categorical(genfromtxt(data_dir + '/' + 'trainY.csv', delimiter=',') - 1)
testX1 = (genfromtxt(data_dir + '/' + 'testX1.csv', delimiter=','))
testX2 = np.reshape(genfromtxt(data_dir + '/' + 'testX2.csv', delimiter=','), newshape=(-1, 10, 2048))
testY = to_categorical(genfromtxt(data_dir + '/' + 'testY.csv', delimiter=',') - 1)
print(trainX1.shape, trainX2.shape, trainY.shape, testX1.shape, testX2.shape, testY.shape)
# freq_layer = Input(shape=(4,))
# context_layer = Input(shape=(10, 2048))
# masked = Masking()(context_layer)
# lstm1 = LSTM(256, activation='sigmoid', recurrent_activation='tanh')(masked)
# drop1 = Dropout(rate=0.95)(lstm1)
# fc_context = Dense(16, activation='tanh')(drop1)
# fc_freq = Dense(16, activation='tanh')(freq_layer)
# merged = merge(inputs=[fc_context, fc_freq], mode='concat', concat_axis=1)
# fc2 = Dense(5, activation='softmax')(merged)
# scene_net = Model(inputs=[freq_layer, context_layer], outputs=fc2)
# print(scene_net.summary())
#
# optm = adam(lr=0.001)
# scene_net.compile(optimizer=optm, loss='categorical_crossentropy', metrics=['accuracy'])
scene_net = load_model(model_path)
print(testY.shape, trainY.shape)
scores = scene_net.evaluate(x=[testX1, testX2], y=testY, batch_size=batch_size)
y_fit = scene_net.predict(x=[testX1, testX2])
savetxt(data_dir + '/' + 'scene_res_none.txt', np.hstack((y_fit, testY)))
print(scores)
| mit |
ecotox/pacfm | pacfm/model/tools/pathway/pathway_analyzer.py | 1 | 7119 | import pickle
import os
import subprocess
from pandas import DataFrame
from pacfm.model import file_provider
class PathwayAnalyzer(object):
def __init__(self, biodb_selector, input_builder): # hierarchical_abundance_data,
self.biodb_selector= biodb_selector
self.input_builder= input_builder
### ideograms should be copied
self.ideograms= input_builder.assembler.ideograms
#self.hier_df_fantom= hierarchical_abundance_data
self.update_dataframe()
#def filter_pathways_by_level(self, pathway_list, level):
# self.fam_hier[level]
def update_dataframe(self):
self.data_frame= self.input_builder.assembler.to_data_frame()
def get_dataframe(self):
return self.data_frame
def filter_pathways_by_key_leaf_features(self, key_type, nPathways=1000, pathway_key_ids= None, remove_feature= False):
"""
ideograms
0: any
1: all
2: manual (key enzyme selection)
3: association
pathway_keys: dictionary of pathways as keys and coordinate_lists as values
nPathways: the number of maximum number of association that a key enzyme is
restricted by.
"""
ide= self.ideograms[-1]
for pathway in ide.chromosomes:
key_feature_ids= None
if pathway_key_ids is not None:
if pathway.name in pathway_key_ids:
key_feature_ids= pathway_key_ids[pathway.name]
pathway.filter_coordinates_by_key_features(key_type, nPathways, key_feature_ids, remove_feature)
if remove_feature:
filtered_link_features= pathway.get_filtered()
else:
filtered_link_features= pathway.get_null_features()
for feature in filtered_link_features:
ide.filter_links_by_feature(feature)
#remove the link coordinates from the ide
self.update_dataframe()
### pathway object should have an attribute called
# db_features: including total sequence length and total number of proteins
# pathway_container object should potentially keep the dump
def set_pathway_info(self):
for ide in self.ideograms:
ide.load_db_data()
self.update_dataframe()
def update_ideograms_by_dataframe(self, dataframe):
for ide in self.ideograms:
ide.update_by_dataframe(dataframe)
def update_ideograms_by_pathways(self, pathways):
for ide in self.ideograms:
ide.update_by_pathways(pathways)
def normalize_by_pathway(self, pathway_feature, level=3):
"""
pathway_feature can be n_protein or sequence_lenght
"""
self.set_pathway_info()
pathway_factor= {c.name: c.db_data[pathway_feature] for c in self.ideograms[level-1].chromosomes}
nLevels= self.biodb_selector.getLevelCount()
df_lengths= DataFrame(pathway_factor.items(), index= range(len(pathway_factor)), columns=["Level %s" % level,"Length"])
#return df_lengths
## by merging acording to the lengths dataframe, we also
## filter the main dataframe in the meanwhile
df_merged= df_lengths.merge(self.data_frame, on= "Level %s" % level)
### columns have one extra level for accession and one more
### for the lengths in df_merged
df_merged[self.data_frame.columns[nLevels+1:]]= df_merged[self.data_frame.columns[nLevels+1:]].divide(df_merged['Length'].values, axis= 0)
df_normalized= df_merged[self.data_frame.columns]
self.update_ideograms_by_dataframe(df_normalized)
self.update_dataframe()
def normalize_by_algorithm(self, algorithm):
"""
fam
"""
if algorithm == 'minpath':
if not self.biodb_selector.db_name.lower().startswith('kegg'):
raise Exception('MinPath algorithm is only designed for the KEGG database!')
minpath_file= file_provider['minpath']['input']
with open(minpath_file, 'w') as fIn:
accessions= set(self.data_frame['Accession'])
for k,v in zip(range(len(accessions)), accessions):
fIn.write('%s\t%s\n'%(k,v))
minpath_program= file_provider['minpath']['bin']
minpath_output= file_provider['minpath']['output']
cmd= 'python %s -ko %s -report %s' % (minpath_program, minpath_file, minpath_output)
run_cmd(cmd)
Pathways={}
Discarded= {}
with open(minpath_output) as minOut:
for line in minOut:
cols= line.rstrip('\n').split()
minp= cols[6]
test= int(cols[7])
name= ' '.join(cols[13:]).lower()
accession= cols[1].strip()
try:
pathway_id= self.biodb_selector.getFeatureByAccession(unicode(accession)).id
except:
print accession, "not found"
continue
if test:
Pathways[pathway_id]=1
else:
Discarded[pathway_id]=1
pathways= Pathways.keys()
discarded= Discarded.keys()
self.update_ideograms_by_pathways(pathways)
self.update_dataframe()
def run_cmd(cmd):
p = subprocess.Popen(cmd, shell= True, stdout=subprocess.PIPE)
#ret_code = p.wait()
output = p.communicate()[0]
#print output
return output
#write ide updater functions for the normalization functions
trash= """
def normalize_by_orthologous_sequence_length(self, Orth_sequence_lengths):
'''
!!! skip this!!!
!!! - instead of dividing by the enzyme length of each ko
!!! get the total sequence length of enzymes required in a
!!! pathway!!!
'''
nLevels= self.biodb_selector.getLevelCount()
df_lengths= DataFrame(Orth_sequence_lengths.items(), index= range(len(Orth_sequence_lengths)), columns=["Accession","Length"])
## by merging acording to the lengths dataframe, we also
## filter the main dataframe in the meanwhile
df_merged= df_lengths.merge(self.data_frame, on= "Accession")
### columns have one extra level for accession and one more
### for the lengths in df_merged
df_merged[self.data_frame.columns[nLevels+1:]]= df_merged[self.data_frame.columns[nLevels+1:]].divide(df_merged['Length'].values, axis= 0)
return df_merged[self.data_frame.columns]
"""
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/sparse/tests/test_indexing.py | 7 | 38977 | # pylint: disable-msg=E1101,W0612
import nose # noqa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[2], 0)
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc[0], 1)
self.assertTrue(np.isnan(sparse.loc[1]))
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
self.assertTrue(np.isnan(result[-1]))
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[2]))
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[1]))
self.assertEqual(sparse.iloc[4], 0)
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
self.assertEqual(sparse.at[0], orig.at[0])
self.assertTrue(np.isnan(sparse.at[1]))
self.assertTrue(np.isnan(sparse.at[2]))
self.assertEqual(sparse.at[3], orig.at[3])
self.assertTrue(np.isnan(sparse.at[4]))
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertTrue(np.isnan(sparse.at['c']))
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertTrue(np.isnan(sparse.at['e']))
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertEqual(sparse.at['c'], orig.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertEqual(sparse.at['e'], orig.at['e'])
def test_iat(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertTrue(np.isnan(sparse.iat[2]))
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertTrue(np.isnan(sparse.iat[4]))
self.assertTrue(np.isnan(sparse.iat[-1]))
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertEqual(sparse.iat[2], orig.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertEqual(sparse.iat[4], orig.iat[4])
self.assertEqual(sparse.iat[-1], orig.iat[-1])
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
self.assertEqual(s.get(0), 1)
self.assertTrue(np.isnan(s.get(1)))
self.assertIsNone(s.get(5))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer])
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries([1, 3], index=['a', 'c'],
dtype=np.float64, kind=kind)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assertRaisesRegexp(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
_multiprocess_can_split_ = True
def setUp(self):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], orig[0])
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], orig[3])
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse['C', 0], orig['C', 0])
self.assertTrue(np.isnan(sparse['A', 1]))
self.assertTrue(np.isnan(sparse['B', 0]))
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc['C', 0], orig.loc['C', 0])
self.assertTrue(np.isnan(sparse.loc['A', 1]))
self.assertTrue(np.isnan(sparse.loc['B', 0]))
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
class TestSparseDataFrameIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse[[1, 2]],
orig[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc[0, 'x'], 1)
self.assertTrue(np.isnan(sparse.loc[1, 'z']))
self.assertEqual(sparse.loc[2, 'z'], 4)
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['a', 'x'], 1)
self.assertTrue(np.isnan(sparse.loc['b', 'z']))
self.assertEqual(sparse.loc['c', 'z'], 4)
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b'], orig.loc['b'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
self.assertEqual(sparse.iloc[1, 1], 3)
self.assertTrue(np.isnan(sparse.iloc[2, 0]))
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(tm.TestCase):
def setUp(self):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
self.assertEqual(row.dtype, object)
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
self.assertEqual(self.ss.iloc[i], self.ss[idx])
self.assertEqual(type(self.ss.iloc[i]),
type(self.ss[idx]))
self.assertEqual(self.ss['string'], 'a')
self.assertEqual(self.ss['int'], 1)
self.assertEqual(self.ss['float'], 1.1)
self.assertEqual(self.ss['object'], [])
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
| mit |
conradfriedrich/termspec | termspec/helpers.py | 1 | 6311 | # -*- coding: utf-8 -*-
import string
import pickle
import pandas as pd
from nltk import FreqDist
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords as StopWords
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
def normalize(words, language = 'english'):
# removes stopwords, lowercases, removes non-alphanumerics and stems (snowball)
# words: list of strings
# assert not isinstance(lst, basestring)
def ispunctuation(word):
punctuation = string.punctuation + "„“”—–"
for letter in word:
if not letter in punctuation:
return False
return True
stopwords = StopWords.words(language)
stemmer = SnowballStemmer(language)
#lowercase all terms
words = [w.lower() for w in words]
#remove stopwords
words = [w for w in words if not w in stopwords]
# stem (snowball)
words = [stemmer.stem(w) for w in words]
# remove all numerical terms
words = [w for w in words if not w.isnumeric()]
# remove pure punctuations
words = [w for w in words if not ispunctuation(w)]
#remove short words
words = [w for w in words if not len(w) < 3]
return words
def frequency_threshold(tokens, fqt = 10):
"""Return only those WORDS (i.e. unique wordforms) that appear more frequent than @fqt"""
fq = FreqDist(tokens)
fqt = fqt - 1
words = list( filter( lambda x: x[1] > fqt, fq.items() ) )
words = [item[0] for item in words]
return words
def remove_word_pairs_not_in_corpus(word_pairs, words, language = 'english'):
"""Only return those word pairs in @word_pairs of which both words are in @words
Expects @words to already be normalized"""
if not (word_pairs and words):
raise ValueError('Cannot remove word_pairs, array empty')
return_word_pairs = []
for pair in word_pairs:
pair_in_words = True
for word in pair:
word = normalize([word], language)[0]
if word not in words:
pair_in_words = False
if pair_in_words:
return_word_pairs.append(pair)
return return_word_pairs
def remove_words_not_in_list(word_list, words, language = 'english'):
"""Only return those strings of @word_list that are also in @words
Expects @words to already be normalized
"""
if not (word_list and words):
raise ValueError('Cannot remove word_pairs, array empty')
word_list = [w for w in word_list if normalize([w], language)[0] in words]
return word_list
def printprettymatrix(M, rns = None, cns = None, filename = None):
"""Prints a Matrix with row and columns labels
Matrix should be dense.
Arguments:
M -- Matrix to print
rns -- Row labels
cns -- Columnn labels
Optional plotz says to frobnicate the bizbaz first.
"""
df = pd.DataFrame(M, columns=cns, index=rns)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.expand_frame_repr', False)
if filename:
df.to_csv(filename, encoding = 'utf-8')
else:
print(df.round(2))
def flatten_documents_to_strings(docs):
"""Flattens the given documents in nested form to a string representation:
[ #docs
[ #document1
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
],
[ #document2
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
]
]
becomes
[ #docs
's1_word1 s1_word2 s1_word3 s2_word1 s2_word2 s2_word3', #document1
's1_word1 s1_word2 s1_word3 s2_word1 s2_word2 s2_word3', #document2
]
"""
strdocs = []
for doc in docs:
strdoc = [' '.join(sent) for sent in doc]
strdoc = ' '.join(strdoc)
strdocs.append(strdoc)
return strdocs
def flatten_documents_to_sentence_strings(docs):
"""Flattens the given documents in nested form to a string representation where each sentence is a new document (useful for sentence-wise cooccurrence measuring)
[ #docs
[ #document1
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
],
[ #document2
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
]
]
becomes
[ #docs
's1_word1 s1_word2 s1_word3', #document1_sentence1
's2_word1 s2_word2 s2_word3', #document1_sentence2
's1_word1 s1_word2 s1_word3', #document2_sentence1
s2_word1 s2_word2 s2_word3', #document2_sentence2
]
"""
strsents = []
for doc in docs:
strsents.extend([' '.join(sent) for sent in doc])
return strsents
def write_to_file(filename, data):
"""Writes the file at @filename. Does not catch errors by design, i want them kill the script."""
filehandler = open(filename,"wb")
pickle.dump(data,filehandler)
filehandler.close()
def read_from_file(filename):
"""Reads the file at @filename. Does not throw FileNotFoundError """
data = None
try:
file = open(filename,'rb')
data = pickle.load(file)
file.close()
except FileNotFoundError as error:
# print(error)
# print('Returning empty data...')
0
return data
def mc_indices(context_vector, fns, mc = 50):
"""Return the indices of the @mc highest values of context_vector.
@fns is just for reference. Not really optimized.
"""
# If the context vector has more nonzero elements than mc, only take the mc occurrences!
if len(np.flatnonzero(context_vector)) > mc:
fns_index_values = []
for i, coeff in enumerate(context_vector):
fns_index_values.append((fns[i], i, coeff))
# Remove zero Cooccurrence Coefficient
fns_index_values = [fiv for fiv in fns_index_values if not fiv[2] == 0]
fns_index_values = sorted(fns_index_values, key=lambda tuple: tuple[2], reverse=True)
indices = [fiv[1] for fiv in fns_index_values]
indices = np.array(indices)
indices = indices[:mc]
else:
indices = np.flatnonzero(context_vector)
return indices
| mit |
vlimant/IntelROCCS | CUADRnT/src/python/cuadrnt/data_analysis/rankings/generic.py | 4 | 4117 | #!/usr/bin/env python2.7
"""
File : generic.py
Author : Bjorn Barrefors <bjorn dot peter dot barrefors AT cern dot ch>
Description: Generic class for all ranking algorithms
"""
# system modules
import logging
import datetime
import json
import math
import numpy as np
from sklearn.externals import joblib
# package modules
from cuadrnt.data_management.tools.sites import SiteManager
from cuadrnt.data_management.tools.datasets import DatasetManager
from cuadrnt.data_management.tools.popularity import PopularityManager
from cuadrnt.data_management.core.storage import StorageManager
class GenericRanking(object):
"""
Generic Ranking class
"""
def __init__(self, config=dict()):
self.logger = logging.getLogger(__name__)
self.config = config
self.sites = SiteManager(self.config)
self.datasets = DatasetManager(self.config)
self.popularity = PopularityManager(self.config)
self.storage = StorageManager(self.config)
self.max_replicas = int(config['rocker_board']['max_replicas'])
self.name = 'generic'
self.data_path = self.config['paths']['data']
self.data_tiers = config['tools']['valid_tiers'].split(',')
self.preprocessed_data = dict()
self.clf_trend = dict()
self.clf_avg = dict()
def predict_trend(self, features, data_tier):
"""
Predict trend based on features
"""
prediction = self.clf_trend[data_tier].predict(features)
return prediction[0]
def predict_avg(self, features, data_tier):
"""
Predict trend based on features
"""
prediction = self.clf_avg[data_tier].predict(features)
return prediction[0]
def train(self):
"""
Training classifier and regressor
"""
for data_tier in self.data_tiers:
fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')
self.preprocessed_data[data_tier] = json.load(fd)
fd.close()
tot = len(self.preprocessed_data[data_tier]['features'])
p = int(math.ceil(tot*0.8))
training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])
trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])
avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])
t1 = datetime.datetime.utcnow()
self.clf_trend[data_tier].fit(training_features, trend_training_classifications)
self.clf_avg[data_tier].fit(training_features, avg_training_classifications)
t2 = datetime.datetime.utcnow()
td = t2 - t1
self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))
joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')
joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')
def test(self):
"""
Test accuracy/score of classifier and regressor
"""
for data_tier in self.data_tiers:
tot = len(self.preprocessed_data[data_tier]['features'])
p = int(math.floor(tot*0.2))
test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])
trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])
avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])
accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)
accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)
self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)
self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)
| mit |
crichardson17/emgtemp | Grains_Sims/Grains_u_sims/u_-3.5_-0.5_grains_0.5_5_sim.py | 1 | 18964 | import matplotlib.pyplot as plt
import numpy as np
import urllib
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.cm as cm
Low_Temp_Color = 'k'
Mid_Temp_Color = 'g'
High_Temp_Color = 'r'
#Temp_Color = 0.5
Cloudy_Sim_Color = 'cyan'
markersize = 40
SDSS_File = '/Users/Sam/Documents/emgtemp/data/4363_gr_5_0_err_dered.csv'
SDSS_Data = np.genfromtxt(SDSS_File,skip_header=1, delimiter = ',',dtype=float,unpack=True,names=True)
NII_6584 = SDSS_Data['Flux_NII_6583']
Ha_6562 = SDSS_Data['Flux_Ha_6562']
OI_6300 = SDSS_Data['Flux_OI_6300']
OIII_5006 = SDSS_Data['Flux_OIII_5006']
Hb_4861 = SDSS_Data['Flux_Hb_4861']
OIII_4363 = SDSS_Data['Flux_OIII_4363']
SII_6716 = SDSS_Data['Flux_SII_6716']
SII_6731 = SDSS_Data['Flux_SII_6730']
OII_3727 = SDSS_Data['Flux_OII_3726'] + SDSS_Data['Flux_OII_3728']
OIII_Hb = np.log10(OIII_5006/Hb_4861)
NII_Ha = np.log10(NII_6584/Ha_6562)
Temp_Ratio = np.log10(OIII_5006/OIII_4363)
S_Ratio = np.log10(SII_6716/SII_6731)
NO_Ratio = np.log10(NII_6584/OII_3727)
OI_Ratio = np.log10(OI_6300/Ha_6562)
O_Ratio = np.log10(OIII_5006/OII_3727)
S_Ha_Ratio = np.log10((SII_6716+SII_6731)/Ha_6562)
Cloudy_File = '/Users/Sam/Documents/emgtemp/Grains_Sims/Grains_u_sims/u_-3.5_-0.5_grains_0.5_5_sim.pun'
Cloudy_Data = np.genfromtxt(Cloudy_File, delimiter = '\t',dtype=float,unpack=True,names=True)
Cloudy_NII_6584 = Cloudy_Data['N__2__6584A']
Cloudy_Ha_6562 = Cloudy_Data['H__1__6563A']
Cloudy_OIII_5006 = Cloudy_Data['O__3__5007A']
Cloudy_Hb_4861 = Cloudy_Data['TOTL__4861A']
Cloudy_OIII_4363 = Cloudy_Data['TOTL__4363A']
Cloudy_SII_6716 = Cloudy_Data['S_II__6716A']
Cloudy_SII_6731 = Cloudy_Data['S_II__6731A']
Cloudy_OII_3727 = Cloudy_Data['TOTL__3727A']
Cloudy_OI_6300 = Cloudy_Data['O__1__6300A']
Cloudy_OIII_Hb = np.log10(Cloudy_OIII_5006/Cloudy_Hb_4861)
Cloudy_NII_Ha = np.log10(Cloudy_NII_6584/Cloudy_Ha_6562)
Cloudy_Temp_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OIII_4363)
Cloudy_S_Ratio = np.log10(Cloudy_SII_6716/Cloudy_SII_6731)
Cloudy_NO_Ratio = np.log10(Cloudy_NII_6584/Cloudy_OII_3727)
Cloudy_OI_Ratio = np.log10(Cloudy_OI_6300/Cloudy_Ha_6562)
Cloudy_O_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OII_3727)
Cloudy_S_Ha_Ratio = np.log10((Cloudy_SII_6716+Cloudy_SII_6731)/Cloudy_Ha_6562)
Grid_File = '/Users/Sam/Documents/emgtemp/Grains_Sims/Grains_u_sims/u_-3.5_-0.5_grains_0.5_5_sim.grd'
Grid_Data = np.genfromtxt(Grid_File,skip_header=1,delimiter = '\t',dtype=float,unpack=True)
Cloudy_Metals = Grid_Data[8,:]
Cloudy_Den = Grid_Data[6,:]
Cloudy_NII_Ha_array = np.reshape(Cloudy_NII_Ha,(7,-1))
Cloudy_OI_Ratio_array = np.reshape(Cloudy_OI_Ratio,(7,-1))
Cloudy_OIII_Hb_array = np.reshape(Cloudy_OIII_Hb,(7,-1))
Cloudy_Temp_Ratio_array = np.reshape(Cloudy_Temp_Ratio,(7,-1))
Cloudy_S_Ratio_array = np.reshape(Cloudy_S_Ratio,(7,-1))
Cloudy_NO_Ratio_array = np.reshape(Cloudy_NO_Ratio,(7,-1))
Cloudy_O_Ratio_array = np.reshape(Cloudy_O_Ratio,(7,-1))
Cloudy_S_Ha_Ratio_array = np.reshape(Cloudy_S_Ha_Ratio,(7,-1))
Cloudy_NII_Ha_transpose = np.transpose(Cloudy_NII_Ha_array)
Cloudy_OI_Ratio_transpose = np.transpose(Cloudy_OI_Ratio_array)
Cloudy_OIII_Hb_transpose = np.transpose(Cloudy_OIII_Hb_array)
Cloudy_Temp_Ratio_transpose = np.transpose(Cloudy_Temp_Ratio_array)
Cloudy_S_Ratio_transpose = np.transpose(Cloudy_S_Ratio_array)
Cloudy_NO_Ratio_transpose = np.transpose(Cloudy_NO_Ratio_array)
Cloudy_O_Ratio_transpose = np.transpose(Cloudy_O_Ratio_array)
Cloudy_S_Ha_Ratio_transpose = np.transpose(Cloudy_S_Ha_Ratio_array)
#cold_data_colors = [plt.cm.Blues(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#mid_data_colors = [plt.cm.Greens(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#hot_data_colors = [plt.cm.Reds(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
grains_colors = [plt.cm.Reds(i) for i in np.linspace(0.25,1,10)]
u_colors = [plt.cm.Greens(i) for i in np.linspace(0.25,1,6)]
def truncate_colormap(cmap, minval=0.15, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
grains_colors_map = truncate_colormap(cm.Reds)
u_colors_map = truncate_colormap(cm.Greens)
#This is bad^ 3 and 7 are the number of densities and ionization parameters used, but ideally this wouldn't be hardcoded.
#sf_count = 0.0
#comp_count = 0.0
#agn_count = 0.0
#liner_count = 0.0
#amb_count = 0.0
shape = ['v']
#####################################################################################################
def getShape(NII_Ha, OIII_Hb, S_Ha_Ratio, OI_Ratio):
# Star forming
if OIII_Hb < 0.61/(NII_Ha-0.05)+1.3 and \
OIII_Hb < 0.72/(S_Ha_Ratio-0.32)+1.30 and \
OIII_Hb < 0.73/(OI_Ratio+0.59)+1.33:
shape = 'x'
#sf_count = sf_count+1
# Composite
elif 0.61/(NII_Ha-0.05)+1.3 < OIII_Hb and \
0.61/(NII_Ha-0.47)+1.19 > OIII_Hb:
shape = '+'
#comp_count = comp_count+1
# AGN
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
(1.89*S_Ha_Ratio)+0.76 < OIII_Hb and \
(1.18*OI_Ratio)+1.30 < OIII_Hb:
shape = 'D'
#agn_count = agn_count+1
# LINERs
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
OIII_Hb < (1.89*S_Ha_Ratio)+0.76 and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
OIII_Hb < (1.18*OI_Ratio)+1.30:
shape = 's'
#liner_count = liner_count+1
else:
# Ambiguous
shape = '*'
#amb_count = amb_count+1
return shape
#####################################################################################################
#####################################################################################################
def getColor(OIII_5006, OIII_4363):
Temp_Color = 'k'
if OIII_5006/OIII_4363<50:
#Temp_Color = '0.25'
Temp_Color = plt.cm.gray(0.2)
#red = red + 1
elif OIII_5006/OIII_4363>50 and OIII_5006/OIII_4363<100:
#Temp_Color = '0.5'
Temp_Color = plt.cm.gray(0.5)
#green = green + 1
elif OIII_5006/OIII_4363>100:
#Temp_Color = '0.75'
Temp_Color = plt.cm.gray(0.75)
#black = black + 1
else:
print ("error")
return Temp_Color
#####################################################################################################
agn_count = 0
liner_count = 0
sf_count = 0
comp_count = 0
amb_count = 0
fig = plt.figure(131)
fig.subplots_adjust(wspace=0.4,hspace=0.4)
sp1 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
if shape == 'D':
agn_count = agn_count +1
elif shape == 's':
liner_count = liner_count +1
elif shape == '+':
comp_count = comp_count +1
elif shape == 'x':
sf_count = sf_count+1
elif shape == '*':
amb_count = amb_count +1
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
#print(Temp_Color)
plt.scatter(NII_Ha[i],OIII_Hb[i],s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print (Temp_Color)
#print(sf_count)
#print(comp_count)
#print(agn_count)
#print(liner_count)
#print(amb_count)
#print(red)
#print(green)
#print(black)
#print(counter)
plt.xlim(-2.5,0.5)
plt.ylim(-1,1.3)
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("BPT Diagram")
#plt.scatter(Cloudy_NII_Ha,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp1.set_color_cycle(grains_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OIII_Hb_array, lw = '2')
sp1.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='0.75', s = markersize), plt.scatter([],[],color='0.5', s = markersize), plt.scatter([],[],color='0.25', s = markersize)], (r"T$_e$<1.17*10$^4$",r"1.17*10$^4$<T$_e$<1.54*10$^4$",r"T$_e$>1.54*10$^4$"),scatterpoints = 1, loc = 'lower left',fontsize =8)
x=np.linspace(-1.5,0.3,50)
y=((.61/(x-.47))+1.19)
plt.plot(x,y,color=Low_Temp_Color)
x3=np.linspace(-1,-0.2,50)
y3=((.61/(x3-.05)+1.3))
plt.plot(x3,y3,linestyle='--',color='k')
#counter=0
sm = plt.cm.ScalarMappable(norm=colors.Normalize(vmin=0.5, vmax=5.0),cmap=grains_colors_map)
sm._A = []
smaxes = inset_axes(sp1, width=0.06, height=0.4, loc=3, bbox_to_anchor=(0.14, 0.1), bbox_transform=sp1.figure.transFigure)
#smaxes = inset_axes(sp1, width="3%", height="20%", loc=3, bbox_to_anchor=(0.1, 0.1), bbox_transform=ax.figure.transFigure)
cbar = plt.colorbar(sm,cax=smaxes)
cbar.ax.set_title('Grains',fontsize=8)
cbar.set_ticks([0.5,5.0])
cbar.set_ticklabels([0.5,5.0])
cbar.ax.tick_params(labelsize=8)
sp2 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],Temp_Ratio[i], s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print(counter)
plt.ylabel(r"log([OIII] $\lambda$5007/4363)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Temperature")
plt.ylim(0,3)
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_Temp_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp2.set_color_cycle(grains_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_Temp_Ratio_array, lw = '2')
sp2.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_Temp_Ratio_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sm = plt.cm.ScalarMappable(norm=colors.Normalize(vmin=0.5, vmax=2.0),cmap=u_colors_map)
sm._A = []
smaxes = inset_axes(sp2, width=0.06, height=0.4, loc=3, bbox_to_anchor=(0.6, .3), bbox_transform=sp2.figure.transFigure)
cbar = plt.colorbar(sm,cax=smaxes)
cbar.ax.set_title('U',fontsize=8)
cbar.set_ticks([0.5,2.0])
cbar.set_ticklabels([0.5,2.0])
cbar.ax.tick_params(labelsize=8)
sp3 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],S_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([SII] $\lambda$6717/6731)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.ylim(-1.0,1.0)
plt.xlim(-2.5,0.5)
plt.title("Density")
#plt.scatter(Cloudy_NII_Ha,Cloudy_S_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp3.set_color_cycle(grains_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_S_Ratio_array, lw = '2')
sp3.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_S_Ratio_transpose, lw = '2',linestyle = '--')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp4 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],NO_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([NII] $\lambda$6584/[OII] $\lambda$3727)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Metallicity")
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_NO_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp4.set_color_cycle(grains_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_NO_Ratio_array, lw = '2')
sp4.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_NO_Ratio_transpose, lw = '2',linestyle = '--')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
plt.show()
#plt.suptitle('hden = 2.4, U = -1.5, 0.5 < Z < 2.0, 0.5 < grains < 5.0')
#plt.savefig("z_0.5_2_grains_0.5_5_plots.pdf")
fig2 = plt.figure(132)
sp5 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],OI_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OI] $\lambda$6300/H$\alpha$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("OI_6300")
plt.xlim(-2.5,0.5)
plt.ylim(-2.5,0)
#plt.scatter(Cloudy_NII_Ha,Cloudy_OI_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp5.set_color_cycle(grains_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OI_Ratio_array, lw = '2')
sp5.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OI_Ratio_transpose, lw = '2',linestyle = '--')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp6 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("OI_6300 vs. OIII_5007")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp6.set_color_cycle(grains_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_OIII_Hb_array, lw = '2')
sp6.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
x6 = np.linspace(-2.5,-0.6,50)
y6 = ((.73/(x6+0.59))+1.33)
plt.plot(x6,y6,color = 'k')
x7 = np.linspace(-1.125,0.25,50)
y7 = (1.18*x7) + 1.30
plt.plot(x7,y7, color = 'b')
plt.ylim(-1,1.5)
plt.xlim(-2.5,0.5)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp7 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],O_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/[OII]$\lambda$3727)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("Groves Diagram")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_O_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp7.set_color_cycle(grains_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_O_Ratio_array, lw = '2')
sp7.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_O_Ratio_transpose, lw = '2',linestyle = '--')
x1 = np.linspace(-2.0,-.25,50)
y1 = ((-1.701*x1)-2.163)
x2 = np.linspace(-1.05998,0,50)
y2 = x2 + 0.7
plt.plot(x2,y2, color = 'k')
plt.plot(x1,y1, color = 'k')
plt.xlim(-2.5,0)
plt.ylim(-1.5,1)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp8 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(S_Ha_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([SII]/H$\alpha$)")
plt.title("OIII_5007 vs. SII")
plt.ylim(-1,1.5)
x4 = np.linspace(-0.32,0.25,50)
y4 = ((1.89*x4)+0.76)
x5 = np.linspace(-1.5,0.25,50)
y5 = ((0.72/(x - 0.32))+1.3)
plt.plot(x5,y5,color = 'k')
plt.plot(x4,y4,color = 'b')
#plt.scatter(Cloudy_S_Ha_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp8.set_color_cycle(grains_colors)
plt.plot(Cloudy_S_Ha_Ratio_array,Cloudy_OIII_Hb_array, lw = '2')
sp8.set_color_cycle(u_colors)
plt.plot(Cloudy_S_Ha_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2',linestyle = '--')
#plt.suptitle('hden = 2.4, U = -1.5, 0.5 < Z < 2.0, 0.5 < grains < 5.0')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
plt.show()
#print(agn_count)
#print(liner_count)
#print(comp_count)
#print(sf_count)
#print (amb_count) | mit |
jiajunshen/partsNet | scripts/extensionPartsWithRBM.py | 1 | 38696 | from __future__ import division, print_function,absolute_import
import pylab as plt
import amitgroup.plot as gr
import numpy as np
import amitgroup as ag
import os
import pnet
import matplotlib.pylab as plot
from pnet.cyfuncs import index_map_pooling
from Queue import Queue
"""This tutorial introduces restricted boltzmann machines (RBM) using Theano.
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
"""
import cPickle
import gzip
import time
import PIL.Image
import numpy
import theano
import theano.tensor as T
import os
from theano.tensor.shared_randomstreams import RandomStreams
from utils import tile_raster_images
import sklearn.cluster
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=200, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
[pre_sigmoid_nvs, nv_means, nv_samples,
pre_sigmoid_nhs, nh_means, nh_samples], updates = \
theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k)
# determine gradients on RBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
updates[param] = param - gparam * T.cast(lr,
dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = T.mean(
T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
(1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1))
return cross_entropy
def test_rbm(learning_rate=0.05, training_epochs=30,
dataset='/Users/jiajunshen/Documents/Research/partsNet/data/mnist.pkl.gz', batch_size=20,
n_chains=20, n_samples=10, output_folder='rbm_plots',
n_hidden=20):
"""
Demonstrate how to train and afterwards sample from it using Theano.
This is demonstrated on MNIST.
:param learning_rate: learning rate used for training the RBM
:param training_epochs: number of epochs used for training
:param dataset: path the the pickled dataset
:param batch_size: size of a batch used to train the RBM
:param n_chains: number of parallel Gibbs chains to be used for sampling
:param n_samples: number of samples to plot for each chain
"""
datasets = load_data(shuffledExtract,shuffledLabel)
train_set_x, train_set_y = datasets[0]
# test_set_x, test_set_y = datasets[2]
numVisible = shuffledExtract.shape[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
rbm = RBM(input=x, n_visible= numVisible,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,
persistent=persistent_chain, k=15)
#################################
# Training the RBM #
#################################
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function([index], cost,
updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]},
name='train_rbm')
plotting_time = 0.
start_time = time.clock()
# go through training epochs
for epoch in xrange(training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
mean_cost += [train_rbm(batch_index)]
print('Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost))
# Plot filters after each training epoch
plotting_start = time.clock()
# Construct image from the weight matrix
# image = PIL.Image.fromarray(tile_raster_images(
# X=rbm.W.get_value(borrow=True).T,
# img_shape=(28, 28), tile_shape=(10, 10),
# tile_spacing=(1, 1)))
# image.save('filters_at_epoch_%i.png' % epoch)
plotting_stop = time.clock()
plotting_time += (plotting_stop - plotting_start)
end_time = time.clock()
pretraining_time = (end_time - start_time) - plotting_time
print ('Training took %f minutes' % (pretraining_time / 60.))
#################################
# Sampling from the RBM #
#################################
# # find out the number of test samples
# number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]
# # pick random test examples, with which to initialize the persistent chain
# test_idx = rng.randint(number_of_test_samples - n_chains)
# persistent_vis_chain = theano.shared(numpy.asarray(
# test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
# dtype=theano.config.floatX))
# plot_every = 1000
# # define one step of Gibbs sampling (mf = mean-field) define a
# # function that does `plot_every` steps before returning the
# # sample for plotting
# [presig_hids, hid_mfs, hid_samples, presig_vis,
# vis_mfs, vis_samples], updates = \
# theano.scan(rbm.gibbs_vhv,
# outputs_info=[None, None, None, None,
# None, persistent_vis_chain],
# n_steps=plot_every)
# # add to updates the shared variable that takes care of our persistent
# # chain :.
# updates.update({persistent_vis_chain: vis_samples[-1]})
# # construct the function that implements our persistent chain.
# # we generate the "mean field" activations for plotting and the actual
# # samples for reinitializing the state of our persistent chain
# sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
# updates=updates,
# name='sample_fn')
# # create a space to store the image for plotting ( we need to leave
# # room for the tile_spacing as well)
# image_data = numpy.zeros((29 * n_samples + 1, 29 * n_chains - 1),
# dtype='uint8')
# for idx in xrange(n_samples):
# # generate `plot_every` intermediate samples that we discard,
# # because successive samples in the chain are too correlated
# vis_mf, vis_sample = sample_fn()
# print(' ... plotting sample ', idx)
# image_data[29 * idx:29 * idx + 28, :] = tile_raster_images(
# X=vis_mf,
# img_shape=(28, 28),
# tile_shape=(1, n_chains),
# tile_spacing=(1, 1))
# # construct image
# image = PIL.Image.fromarray(image_data)
# image.save('samples.png')
# os.chdir('../')
return rbm
def extract(ims,allLayers):
#print(allLayers)
curX = ims
for layer in allLayers:
#print('-------------')
#print(layer)
curX = layer.extract(curX)
#print(np.array(curX).shape)
#print('------------------')
return curX
def partsPool(originalPartsRegion, numParts):
partsGrid = np.zeros((1,1,numParts))
for i in range(originalPartsRegion.shape[0]):
for j in range(originalPartsRegion.shape[1]):
if(originalPartsRegion[i,j]!=-1):
partsGrid[0,0,originalPartsRegion[i,j]] = 1
return partsGrid
def test(ims,labels,net):
yhat = net.classify(ims)
return yhat == labels
def testInvestigation(ims, labels, net):
yhat = net.classify((ims,500))
return np.where(yhat!=labels), yhat
def load_data(allDataX,allDataLabel):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
# #############
# # LOAD DATA #
# #############
# # Download the MNIST dataset if it is not present
# data_dir, data_file = os.path.split(dataset)
# if data_dir == "" and not os.path.isfile(dataset):
# # Check if dataset is in the data directory.
# new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
# if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
# dataset = new_path
# if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
#origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
#print ('Downloading data from %s' % origin)
#urllib.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
# f = gzip.open(dataset, 'rb')
# train_set, valid_set, test_set = cPickle.load(f)
# f.close()
train_set = (allDataX[:5000],allDataLabel[:5000])
# valid_set = (allDataX[4000:],allDataLabel[4000:])
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
print(data_x.shape,data_y.shape)
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
# test_set_x, test_set_y = shared_dataset(test_set)
# valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y)]#, (valid_set_x, valid_set_y)]#,
# (test_set_x, test_set_y)]
return rval
#def trainPOP():
if pnet.parallel.main(__name__):
#X = np.load("testMay151.npy")
#X = np.load("_3_100*6*6_1000*1*1_Jun_16_danny.npy")
X = np.load("original6*6 2.npy")
#X = np.load("sequential6*6.npy")
model = X.item()
# get num of Parts
numParts = model['layers'][1]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
ims,labels = ag.io.load_mnist('training')
trainingDataNum = 1000
firstLayerShape = 6
extractedFeature = extract(ims[0:trainingDataNum],allLayer[0:2])[0]
print(extractedFeature.shape)
extractedFeature = extractedFeature.reshape(extractedFeature.shape[0:3])
partsPlot = np.zeros((numParts,firstLayerShape,firstLayerShape))
partsCodedNumber = np.zeros(numParts)
imgRegion= [[] for x in range(numParts)]
partsRegion = [[] for x in range(numParts)]
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(29 - firstLayerShape):
for n in range(29 - firstLayerShape):
if(codeParts[m,n]!=-1):
partsPlot[codeParts[m,n]]+=ims[i,m:m+firstLayerShape,n:n+firstLayerShape]
partsCodedNumber[codeParts[m,n]]+=1
for j in range(numParts):
partsPlot[j] = partsPlot[j]/partsCodedNumber[j]
secondLayerCodedNumber = 0
secondLayerShape = 12
frame = (secondLayerShape - firstLayerShape)/2
frame = int(frame)
totalRange = 29 - firstLayerShape
if 1:
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(totalRange)[frame:totalRange - frame]:
for n in range(totalRange)[frame:totalRange - frame]:
if(codeParts[m,n]!=-1):
imgRegion[codeParts[m,n]].append(ims[i, m - frame:m + secondLayerShape - frame,n - frame:n + secondLayerShape - frame])
secondLayerCodedNumber+=1
partsGrid = partsPool(codeParts[m-frame:m+frame + 1,n-frame:n+frame + 1],numParts)
partsRegion[codeParts[m,n]].append(partsGrid)
newPartsRegion = []
for i in range(numParts):
newPartsRegion.append(np.asarray(partsRegion[i],dtype = np.uint8))
np.save('/var/tmp/partsRegionOriginalJun29.npy',newPartsRegion)
np.save('/var/tmp/imgRegionOriginalJun29.npy',imgRegion)
##second-layer parts
numSecondLayerParts = 10
allPartsLayer = [[pnet.PartsLayer(numSecondLayerParts,(1,1),
settings=dict(outer_frame = 0,
threshold = 5,
sample_per_image = 1,
max_samples=10000,
min_prob = 0.005,
#min_llh = -40
))]
for i in range(numParts)]
allPartsLayerImg = np.zeros((numParts,numSecondLayerParts,secondLayerShape,secondLayerShape))
allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts))
zeroParts = 0
imgRegionPool = [[] for i in range(numParts * numSecondLayerParts)]
for i in range(numParts):
if(not partsRegion[i]):
continue
allPartsLayer[i][0].train_from_samples(np.array(partsRegion[i]),None)
extractedFeaturePart = extract(np.array(partsRegion[i],dtype = np.uint8),allPartsLayer[i])[0]
print(extractedFeaturePart.shape)
for j in range(len(partsRegion[i])):
if(extractedFeaturePart[j,0,0,0]!=-1):
partIndex = extractedFeaturePart[j,0,0,0]
allPartsLayerImg[i,partIndex]+=imgRegion[i][j]
imgRegionPool[i * numSecondLayerParts + partIndex].append(imgRegion[i][j])
allPartsLayerImgNumber[i,partIndex]+=1
else:
zeroParts+=1
for i in range(numParts):
for j in range(numSecondLayerParts):
if(allPartsLayerImgNumber[i,j]):
allPartsLayerImg[i,j] = allPartsLayerImg[i,j]/allPartsLayerImgNumber[i,j]
#np.save("exPartsOriginalJun29.npy",allPartsLayer)
if 1:
"""
Visualize the SuperParts
"""
settings = {'interpolation':'nearest','cmap':plot.cm.gray,}
settings['vmin'] = 0
settings['vmax'] = 1
plotData = np.ones(((2 + secondLayerShape)*100+2,(2+secondLayerShape)*(numSecondLayerParts + 1)+2))*0.8
visualShiftParts = 0
if 0:
allPartsPlot = np.zeros((20,numSecondLayerParts + 1,12,12))
gr.images(partsPlot.reshape(numParts,6,6),zero_to_one=False,vmin = 0, vmax = 1)
allPartsPlot[:,0] = 0.5
allPartsPlot[:,0,3:9,3:9] = partsPlot[20:40]
allPartsPlot[:,1:,:,:] = allPartsLayerImg[20:40]
gr.images(allPartsPlot.reshape(20 * (numSecondLayerParts + 1),12,12),zero_to_one=False, vmin = 0, vmax =1)
elif 1:
for i in range(numSecondLayerParts + 1):
for j in range(numParts):
if i == 0:
plotData[5 + j * (2 + secondLayerShape):5+firstLayerShape + j * (2 + secondLayerShape), 5 + i * (2 + secondLayerShape): 5+firstLayerShape + i * (2 + secondLayerShape)] = partsPlot[j+visualShiftParts]
else:
plotData[2 + j * (2 + secondLayerShape):2 + secondLayerShape+ j * (2 + secondLayerShape),2 + i * (2 + secondLayerShape): 2+ secondLayerShape + i * (2 + secondLayerShape)] = allPartsLayerImg[j+visualShiftParts,i-1]
plot.figure(figsize=(10,40))
plot.axis('off')
plot.imshow(plotData, **settings)
plot.savefig('originalExParts_2.pdf',format='pdf',dpi=900)
else:
pass
"""
Train A Class-Model Layer
"""
digits = range(10)
sup_ims = []
sup_labels = []
classificationTrainingNum = 1000
for d in digits:
ims0 = ag.io.load_mnist('training', [d], selection = slice(classificationTrainingNum), return_labels = False)
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0),dtype = np.int64))
sup_ims = np.concatenate(sup_ims, axis = 0)
sup_labels = np.concatenate(sup_labels,axis = 0)
#thirLevelCurx = np.load('./thirdLevelCurx.npy')
thirLevelCurx = np.load('./thirdLevelCurx_LargeMatch.npy')[:5000]
poolHelper = pnet.PoolingLayer(shape = (4,4),strides = (4,4))
thirLevelCurx = np.array(thirLevelCurx, dtype = np.int64)
pooledExtract = poolHelper.extract((thirLevelCurx[:,:,:,np.newaxis],500))
testImg_curX = np.load('./thirdLevelCurx_Test.npy')[:5000]
testImg_curX = np.array(testImg_curX, dtype = np.int64)
pooledTest = poolHelper.extract((testImg_curX[:,:,:,np.newaxis],500))
print(pooledExtract.sum(axis = 3))
print(pooledExtract.shape)
sup_labels = sup_labels[:5000]
sup_ims = sup_ims[:5000]
index = np.arange(5000)
randomIndex = np.random.shuffle(index)
pooledExtract = pooledExtract.reshape(5000,-1)
shuffledExtract = pooledExtract[index]
shuffledLabel = sup_labels[index]
testImg = sup_ims[index]
datasets = load_data(shuffledExtract,shuffledLabel)
train_set_x, train_set_y = datasets[0]
#testRbm = test_rbm()
#weights = testRbm.W.get_value(borrow=True)
#np.save('weights20Hidden.npy',weights)
weights = np.load('weights20Hidden.npy')
weights = weights.reshape(4,4,500,20)
newsup_labels = []
classificationTrainingNum = 100
for d in digits:
newsup_labels.append(d * np.ones(100,dtype = np.int64))
sup_labels = np.concatenate(newsup_labels,axis = 0)
trainingImg_curX_all = np.load('./thirdLevelCurx_LargeMatch.npy')
trainingImg_curX = trainingImg_curX_all[:1000]
for d in digits:
trainingImg_curX[d * 100: (d + 1)*100] = trainingImg_curX_all[d * 1000: d*1000+100]
trainingImg_curX = np.array(trainingImg_curX, dtype = np.int64)
pooledTrain = poolHelper.extract((trainingImg_curX[:,:,:,np.newaxis],500))
trainLabels = sup_labels
newPooledExtract = np.array(pooledTrain[:1000]).reshape(1000,4,4,500)
if 1:
for p in range(4):
for q in range(4):
location1 = newPooledExtract[:,p,q,:]
data = weights[p,q,:500,:]
X = np.array(data.reshape(500,20),dtype=np.double)
kmeans = sklearn.cluster.k_means(np.array(X,dtype = np.double),10)[1]
skipIndex = np.argmax(np.bincount(kmeans))
#Put in all the array of group index here
groupIndexArray = [[] for m in range(10)]
for i in range(10):
if i == skipIndex:
continue
testIndex = i
indexArray = np.where(kmeans == testIndex)[0]
groupIndexArray[testIndex].append(indexArray)
poolingIndex = [[] for m in range(500)]
for k in np.where(np.max(location1,axis=0)!=0)[0]:
if kmeans[k] == skipIndex:
continue
else:
distanceArray = np.array([np.sum((X[m,:]-X[k,:]) * (X[m,:]-X[k,:])) for m in groupIndexArray[kmeans[k]][0]])
#print(distanceArray.shape)
numPooling = (distanceArray.shape[0] + 1)//2
# print(numPooling)
finalPooling = groupIndexArray[kmeans[k]][0][np.argsort(distanceArray)[:numPooling]]
#print(k, finalPooling)
poolingIndex[k].append(finalPooling)
for r in range(1000):
print(r)
for m in range(500):
if newPooledExtract[r,p,q,m] == 1:
if len(poolingIndex[m])==0:
continue
else:
# print(poolingIndex[m][0])
newPooledExtract[r,p,q,:][poolingIndex[m][0]] = 1
#pass
if 0:
for p in range(5):
print(trainLabels[p])
gr.images(trainImg[p])
for m in range(4):
for n in range(4):
gr.images(np.array([allPartsLayerImg[(k%500)//10,k - ((k%500)//10) * 10] for k in np.where(newPooledExtract[p,m,n,:]==1)[0]]))
testImg_curX = np.load('./thirdLevelCurx_Test.npy')
testImg_curX = np.array(testImg_curX, dtype = np.int64)
pooledTest = poolHelper.extract((testImg_curX[:,:,:,np.newaxis],500))
testingNum = 1000
testImg,testLabels = ag.io.load_mnist('testing')
print(pooledTest.shape)
newPooledExtractTest = np.array(pooledTest[:testingNum]).reshape(testingNum,4,4,500)
if 1:
for p in range(4):
for q in range(4):
location1 = newPooledExtractTest[:,p,q,:]
data = weights[p,q,:500,:]
X = np.array(data.reshape(500,20),dtype=np.double)
kmeans = sklearn.cluster.k_means(np.array(X,dtype = np.double),10)[1]
skipIndex = np.argmax(np.bincount(kmeans))
#Put in all the array of group index here
groupIndexArray = [[] for m in range(10)]
for i in range(10):
if i == skipIndex:
continue
testIndex = i
indexArray = np.where(kmeans == testIndex)[0]
groupIndexArray[testIndex].append(indexArray)
poolingIndex = [[] for m in range(500)]
for k in np.where(np.max(location1,axis=0)!=0)[0]:
if kmeans[k] == skipIndex:
continue
else:
distanceArray = np.array([np.sum((X[m,:]-X[k,:]) * (X[m,:]-X[k,:])) for m in groupIndexArray[kmeans[k]][0]])
#print(distanceArray.shape)
numPooling = (distanceArray.shape[0] + 1)//2
# print(numPooling)
finalPooling = groupIndexArray[kmeans[k]][0][np.argsort(distanceArray)[:numPooling]]
#print(k, finalPooling)
poolingIndex[k].append(finalPooling)
for r in range(testingNum):
print(r)
for m in range(500):
if newPooledExtractTest[r,p,q,m] == 1:
if len(poolingIndex[m])==0:
continue
else:
# print(poolingIndex[m][0])
newPooledExtractTest[r,p,q,:][poolingIndex[m][0]] = 1
#pass
newPooledExtract = newPooledExtract.reshape(1000,-1)
newPooledExtractTest = newPooledExtractTest.reshape(testingNum,-1)
#Train a class Model#
testLabels = testLabels[:testingNum]
svmLayer = pnet.SVMClassificationLayer(C = 1.0)
svmLayer.train(newPooledExtract[:1000], trainLabels[:1000])
print("Training Success!")
testImg_Input = np.array(newPooledExtractTest, dtype = np.int64)
testImg_batches = np.array_split(newPooledExtractTest[:testingNum], 200)
print(np.mean(svmLayer.extract(testImg_Input) == testLabels))
if 0:
testLabels_batches = np.array_split(testLabels, 200)
args = [tup + (svmLayer,) for tup in zip(testImg_batches, testLabels_batches)]
corrects = 0
total = 0
def format_error_rate(pr):
return "{:.2f}%".format(100 * (1-pr))
def clustering(X,L,layer):
return L == layer.extract(X)
print("Testing Starting...")
for i, res in enumerate(pnet.parallel.starmap_unordered(clustering,args)):
if i !=0 and i % 20 ==0:
print("{0:05}/{1:05} Error rate: {2}".format(total, len(ims),format_error_rate(pr)))
corrects += res.sum()
print(res.sum())
total += res.size
pr = corrects / total
print("Final error rate:", format_error_rate(pr))
| bsd-3-clause |
Aasmi/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
WarrenWeckesser/scipy | scipy/optimize/_shgo_lib/triangulation.py | 11 | 21463 | import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot # type: ignore[import]
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup:
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super().__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super().__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.